commit 0f60cb847bc6372f9bfc91435f7a59ab6e55ba0c Author: lw-everestlinux Date: Mon Dec 5 16:58:59 2022 -0500 init diff --git a/README b/README new file mode 100644 index 0000000..e69de29 diff --git a/bin/bomtool b/bin/bomtool new file mode 100755 index 0000000..8decdb2 Binary files /dev/null and b/bin/bomtool differ diff --git a/bin/pkg-config b/bin/pkg-config new file mode 120000 index 0000000..243cd47 --- /dev/null +++ b/bin/pkg-config @@ -0,0 +1 @@ +pkgconf \ No newline at end of file diff --git a/bin/pkgconf b/bin/pkgconf new file mode 100755 index 0000000..76eafa9 Binary files /dev/null and b/bin/pkgconf differ diff --git a/bin/x86_64-linux-musl-addr2line b/bin/x86_64-linux-musl-addr2line new file mode 100755 index 0000000..3013945 Binary files /dev/null and b/bin/x86_64-linux-musl-addr2line differ diff --git a/bin/x86_64-linux-musl-ar b/bin/x86_64-linux-musl-ar new file mode 100755 index 0000000..dfd2c4d Binary files /dev/null and b/bin/x86_64-linux-musl-ar differ diff --git a/bin/x86_64-linux-musl-as b/bin/x86_64-linux-musl-as new file mode 100755 index 0000000..8cb5ed8 Binary files /dev/null and b/bin/x86_64-linux-musl-as differ diff --git a/bin/x86_64-linux-musl-c++ b/bin/x86_64-linux-musl-c++ new file mode 100755 index 0000000..588cf5d Binary files /dev/null and b/bin/x86_64-linux-musl-c++ differ diff --git a/bin/x86_64-linux-musl-c++filt b/bin/x86_64-linux-musl-c++filt new file mode 100755 index 0000000..efa5362 Binary files /dev/null and b/bin/x86_64-linux-musl-c++filt differ diff --git a/bin/x86_64-linux-musl-cpp b/bin/x86_64-linux-musl-cpp new file mode 100755 index 0000000..c31e945 Binary files /dev/null and b/bin/x86_64-linux-musl-cpp differ diff --git a/bin/x86_64-linux-musl-elfedit b/bin/x86_64-linux-musl-elfedit new file mode 100755 index 0000000..bf35163 Binary files /dev/null and b/bin/x86_64-linux-musl-elfedit differ diff --git a/bin/x86_64-linux-musl-g++ b/bin/x86_64-linux-musl-g++ new file mode 100755 index 0000000..588cf5d Binary files /dev/null and b/bin/x86_64-linux-musl-g++ differ diff --git a/bin/x86_64-linux-musl-gcc b/bin/x86_64-linux-musl-gcc new file mode 100755 index 0000000..4757224 Binary files /dev/null and b/bin/x86_64-linux-musl-gcc differ diff --git a/bin/x86_64-linux-musl-gcc-12.2.0 b/bin/x86_64-linux-musl-gcc-12.2.0 new file mode 100755 index 0000000..4757224 Binary files /dev/null and b/bin/x86_64-linux-musl-gcc-12.2.0 differ diff --git a/bin/x86_64-linux-musl-gcc-ar b/bin/x86_64-linux-musl-gcc-ar new file mode 100755 index 0000000..bf25992 Binary files /dev/null and b/bin/x86_64-linux-musl-gcc-ar differ diff --git a/bin/x86_64-linux-musl-gcc-nm b/bin/x86_64-linux-musl-gcc-nm new file mode 100755 index 0000000..1fc64a6 Binary files /dev/null and b/bin/x86_64-linux-musl-gcc-nm differ diff --git a/bin/x86_64-linux-musl-gcc-ranlib b/bin/x86_64-linux-musl-gcc-ranlib new file mode 100755 index 0000000..d1c2526 Binary files /dev/null and b/bin/x86_64-linux-musl-gcc-ranlib differ diff --git a/bin/x86_64-linux-musl-gcov b/bin/x86_64-linux-musl-gcov new file mode 100755 index 0000000..27aa126 Binary files /dev/null and b/bin/x86_64-linux-musl-gcov differ diff --git a/bin/x86_64-linux-musl-gcov-dump b/bin/x86_64-linux-musl-gcov-dump new file mode 100755 index 0000000..3aae0d2 Binary files /dev/null and b/bin/x86_64-linux-musl-gcov-dump differ diff --git a/bin/x86_64-linux-musl-gcov-tool b/bin/x86_64-linux-musl-gcov-tool new file mode 100755 index 0000000..3ae87ce Binary files /dev/null and b/bin/x86_64-linux-musl-gcov-tool differ diff --git a/bin/x86_64-linux-musl-ld b/bin/x86_64-linux-musl-ld new file mode 100755 index 0000000..4987dcd Binary files /dev/null and b/bin/x86_64-linux-musl-ld differ diff --git a/bin/x86_64-linux-musl-ld.bfd b/bin/x86_64-linux-musl-ld.bfd new file mode 100755 index 0000000..4987dcd Binary files /dev/null and b/bin/x86_64-linux-musl-ld.bfd differ diff --git a/bin/x86_64-linux-musl-lto-dump b/bin/x86_64-linux-musl-lto-dump new file mode 100755 index 0000000..adfe057 Binary files /dev/null and b/bin/x86_64-linux-musl-lto-dump differ diff --git a/bin/x86_64-linux-musl-nm b/bin/x86_64-linux-musl-nm new file mode 100755 index 0000000..2f9b06b Binary files /dev/null and b/bin/x86_64-linux-musl-nm differ diff --git a/bin/x86_64-linux-musl-objcopy b/bin/x86_64-linux-musl-objcopy new file mode 100755 index 0000000..fc5d1bd Binary files /dev/null and b/bin/x86_64-linux-musl-objcopy differ diff --git a/bin/x86_64-linux-musl-objdump b/bin/x86_64-linux-musl-objdump new file mode 100755 index 0000000..249d63f Binary files /dev/null and b/bin/x86_64-linux-musl-objdump differ diff --git a/bin/x86_64-linux-musl-pkg-config b/bin/x86_64-linux-musl-pkg-config new file mode 120000 index 0000000..243cd47 --- /dev/null +++ b/bin/x86_64-linux-musl-pkg-config @@ -0,0 +1 @@ +pkgconf \ No newline at end of file diff --git a/bin/x86_64-linux-musl-pkgconf b/bin/x86_64-linux-musl-pkgconf new file mode 120000 index 0000000..243cd47 --- /dev/null +++ b/bin/x86_64-linux-musl-pkgconf @@ -0,0 +1 @@ +pkgconf \ No newline at end of file diff --git a/bin/x86_64-linux-musl-ranlib b/bin/x86_64-linux-musl-ranlib new file mode 100755 index 0000000..918b643 Binary files /dev/null and b/bin/x86_64-linux-musl-ranlib differ diff --git a/bin/x86_64-linux-musl-readelf b/bin/x86_64-linux-musl-readelf new file mode 100755 index 0000000..3b5b5f7 Binary files /dev/null and b/bin/x86_64-linux-musl-readelf differ diff --git a/bin/x86_64-linux-musl-size b/bin/x86_64-linux-musl-size new file mode 100755 index 0000000..913ca35 Binary files /dev/null and b/bin/x86_64-linux-musl-size differ diff --git a/bin/x86_64-linux-musl-strings b/bin/x86_64-linux-musl-strings new file mode 100755 index 0000000..79e80e0 Binary files /dev/null and b/bin/x86_64-linux-musl-strings differ diff --git a/bin/x86_64-linux-musl-strip b/bin/x86_64-linux-musl-strip new file mode 100755 index 0000000..0ffcc21 Binary files /dev/null and b/bin/x86_64-linux-musl-strip differ diff --git a/include/pkgconf/libpkgconf/bsdstubs.h b/include/pkgconf/libpkgconf/bsdstubs.h new file mode 100644 index 0000000..4e52d41 --- /dev/null +++ b/include/pkgconf/libpkgconf/bsdstubs.h @@ -0,0 +1,34 @@ +/* + * bsdstubs.h + * Header for stub BSD function prototypes if unavailable on a specific platform. + * + * Copyright (c) 2012 William Pitcock . + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * This software is provided 'as is' and without any warranty, express or + * implied. In no event shall the authors be liable for any damages arising + * from the use of this software. + */ + +#ifndef LIBPKGCONF_BSDSTUBS_H +#define LIBPKGCONF_BSDSTUBS_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +PKGCONF_API extern size_t pkgconf_strlcpy(char *dst, const char *src, size_t siz); +PKGCONF_API extern size_t pkgconf_strlcat(char *dst, const char *src, size_t siz); +PKGCONF_API extern char *pkgconf_strndup(const char *src, size_t len); +PKGCONF_API extern void *pkgconf_reallocarray(void *ptr, size_t m, size_t n); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/pkgconf/libpkgconf/iter.h b/include/pkgconf/libpkgconf/iter.h new file mode 100644 index 0000000..199d299 --- /dev/null +++ b/include/pkgconf/libpkgconf/iter.h @@ -0,0 +1,113 @@ +/* + * iter.h + * Linked lists and iterators. + * + * Copyright (c) 2013 pkgconf authors (see AUTHORS). + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * This software is provided 'as is' and without any warranty, express or + * implied. In no event shall the authors be liable for any damages arising + * from the use of this software. + */ + +#ifndef LIBPKGCONF_ITER_H +#define LIBPKGCONF_ITER_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct pkgconf_node_ pkgconf_node_t; + +struct pkgconf_node_ { + pkgconf_node_t *prev, *next; + void *data; +}; + +typedef struct { + pkgconf_node_t *head, *tail; + size_t length; +} pkgconf_list_t; + +#define PKGCONF_LIST_INITIALIZER { NULL, NULL, 0 } + +static inline void +pkgconf_list_zero(pkgconf_list_t *list) +{ + list->head = NULL; + list->tail = NULL; + list->length = 0; +} + +static inline void +pkgconf_node_insert(pkgconf_node_t *node, void *data, pkgconf_list_t *list) +{ + pkgconf_node_t *tnode; + + node->data = data; + + if (list->head == NULL) + { + list->head = node; + list->tail = node; + list->length = 1; + return; + } + + tnode = list->head; + + node->next = tnode; + tnode->prev = node; + + list->head = node; + list->length++; +} + +static inline void +pkgconf_node_insert_tail(pkgconf_node_t *node, void *data, pkgconf_list_t *list) +{ + pkgconf_node_t *tnode; + + node->data = data; + + if (list->tail == NULL) + { + list->head = node; + list->tail = node; + list->length = 1; + return; + } + + tnode = list->tail; + + node->prev = tnode; + tnode->next = node; + + list->tail = node; + list->length++; +} + +static inline void +pkgconf_node_delete(pkgconf_node_t *node, pkgconf_list_t *list) +{ + list->length--; + + if (node->prev == NULL) + list->head = node->next; + else + node->prev->next = node->next; + + if (node->next == NULL) + list->tail = node->prev; + else + node->next->prev = node->prev; +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/pkgconf/libpkgconf/libpkgconf-api.h b/include/pkgconf/libpkgconf/libpkgconf-api.h new file mode 100644 index 0000000..a924c80 --- /dev/null +++ b/include/pkgconf/libpkgconf/libpkgconf-api.h @@ -0,0 +1,19 @@ +#ifndef LIBPKGCONF_LIBPKGCONF_API_H +#define LIBPKGCONF_LIBPKGCONF_API_H + +/* Makefile.am specifies visibility using the libtool option -export-symbols-regex '^pkgconf_' + * Unfortunately, that is not available when building with meson, so use attributes instead. + */ +#if defined(PKGCONFIG_IS_STATIC) +# define PKGCONF_API +#elif defined(_WIN32) || defined(_WIN64) +# if defined(LIBPKGCONF_EXPORT) || defined(DLL_EXPORT) +# define PKGCONF_API __declspec(dllexport) +# else +# define PKGCONF_API __declspec(dllimport) +# endif +#else +# define PKGCONF_API __attribute__((visibility("default"))) +#endif + +#endif diff --git a/include/pkgconf/libpkgconf/libpkgconf.h b/include/pkgconf/libpkgconf/libpkgconf.h new file mode 100644 index 0000000..1dd443d --- /dev/null +++ b/include/pkgconf/libpkgconf/libpkgconf.h @@ -0,0 +1,417 @@ +/* + * libpkgconf.h + * Global include file for everything in libpkgconf. + * + * Copyright (c) 2011, 2015 pkgconf authors (see AUTHORS). + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * This software is provided 'as is' and without any warranty, express or + * implied. In no event shall the authors be liable for any damages arising + * from the use of this software. + */ + +#ifndef LIBPKGCONF__LIBPKGCONF_H +#define LIBPKGCONF__LIBPKGCONF_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* pkg-config uses ';' on win32 as ':' is part of path */ +#ifdef _WIN32 +#define PKG_CONFIG_PATH_SEP_S ";" +#else +#define PKG_CONFIG_PATH_SEP_S ":" +#endif + +#ifdef _WIN32 +#define PKG_DIR_SEP_S '\\' +#else +#define PKG_DIR_SEP_S '/' +#endif + +#ifdef _WIN32 +#define realpath(N,R) _fullpath((R),(N),_MAX_PATH) +#endif + +#define PKGCONF_BUFSIZE (65535) + +typedef enum { + PKGCONF_CMP_NOT_EQUAL, + PKGCONF_CMP_ANY, + PKGCONF_CMP_LESS_THAN, + PKGCONF_CMP_LESS_THAN_EQUAL, + PKGCONF_CMP_EQUAL, + PKGCONF_CMP_GREATER_THAN, + PKGCONF_CMP_GREATER_THAN_EQUAL +} pkgconf_pkg_comparator_t; + +#define PKGCONF_CMP_COUNT 7 + +typedef struct pkgconf_pkg_ pkgconf_pkg_t; +typedef struct pkgconf_dependency_ pkgconf_dependency_t; +typedef struct pkgconf_tuple_ pkgconf_tuple_t; +typedef struct pkgconf_fragment_ pkgconf_fragment_t; +typedef struct pkgconf_path_ pkgconf_path_t; +typedef struct pkgconf_client_ pkgconf_client_t; +typedef struct pkgconf_cross_personality_ pkgconf_cross_personality_t; + +#define PKGCONF_ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) + +#define PKGCONF_FOREACH_LIST_ENTRY(head, value) \ + for ((value) = (head); (value) != NULL; (value) = (value)->next) + +#define PKGCONF_FOREACH_LIST_ENTRY_SAFE(head, nextiter, value) \ + for ((value) = (head), (nextiter) = (head) != NULL ? (head)->next : NULL; (value) != NULL; (value) = (nextiter), (nextiter) = (nextiter) != NULL ? (nextiter)->next : NULL) + +#define PKGCONF_FOREACH_LIST_ENTRY_REVERSE(tail, value) \ + for ((value) = (tail); (value) != NULL; (value) = (value)->prev) + +#define LIBPKGCONF_VERSION 10903 +#define LIBPKGCONF_VERSION_STR "1.9.3" + +struct pkgconf_fragment_ { + pkgconf_node_t iter; + + char type; + char *data; + + bool merged; +}; + +struct pkgconf_dependency_ { + pkgconf_node_t iter; + + char *package; + pkgconf_pkg_comparator_t compare; + char *version; + pkgconf_pkg_t *parent; + pkgconf_pkg_t *match; + + unsigned int flags; + + int refcount; + pkgconf_client_t *owner; +}; + +struct pkgconf_tuple_ { + pkgconf_node_t iter; + + char *key; + char *value; + + unsigned int flags; +}; + +#define PKGCONF_PKG_TUPLEF_OVERRIDE 0x1 + +struct pkgconf_path_ { + pkgconf_node_t lnode; + + char *path; + void *handle_path; + void *handle_device; +}; + +#define PKGCONF_PKG_PROPF_NONE 0x00 +#define PKGCONF_PKG_PROPF_STATIC 0x01 +#define PKGCONF_PKG_PROPF_CACHED 0x02 +#define PKGCONF_PKG_PROPF_UNINSTALLED 0x08 +#define PKGCONF_PKG_PROPF_VIRTUAL 0x10 + +struct pkgconf_pkg_ { + int refcount; + char *id; + char *filename; + char *realname; + char *version; + char *description; + char *url; + char *pc_filedir; + char *license; + char *maintainer; + char *copyright; + + pkgconf_list_t libs; + pkgconf_list_t libs_private; + pkgconf_list_t cflags; + pkgconf_list_t cflags_private; + + pkgconf_list_t required; /* this used to be requires but that is now a reserved keyword */ + pkgconf_list_t requires_private; + pkgconf_list_t conflicts; + pkgconf_list_t provides; + + pkgconf_list_t vars; + + unsigned int flags; + + pkgconf_client_t *owner; + + /* these resources are owned by the package and do not need special management, + * under no circumstance attempt to allocate or free objects belonging to these pointers + */ + pkgconf_tuple_t *orig_prefix; + pkgconf_tuple_t *prefix; + + uint64_t serial; + + size_t hits; +}; + +typedef bool (*pkgconf_pkg_iteration_func_t)(const pkgconf_pkg_t *pkg, void *data); +typedef void (*pkgconf_pkg_traverse_func_t)(pkgconf_client_t *client, pkgconf_pkg_t *pkg, void *data); +typedef bool (*pkgconf_queue_apply_func_t)(pkgconf_client_t *client, pkgconf_pkg_t *world, void *data, int maxdepth); +typedef bool (*pkgconf_error_handler_func_t)(const char *msg, const pkgconf_client_t *client, void *data); + +struct pkgconf_client_ { + pkgconf_list_t dir_list; + + pkgconf_list_t filter_libdirs; + pkgconf_list_t filter_includedirs; + + pkgconf_list_t global_vars; + + void *error_handler_data; + void *warn_handler_data; + void *trace_handler_data; + + pkgconf_error_handler_func_t error_handler; + pkgconf_error_handler_func_t warn_handler; + pkgconf_error_handler_func_t trace_handler; + + FILE *auditf; + + char *sysroot_dir; + char *buildroot_dir; + + unsigned int flags; + + char *prefix_varname; + + bool already_sent_notice; + + uint64_t serial; + + pkgconf_pkg_t **cache_table; + size_t cache_count; +}; + +struct pkgconf_cross_personality_ { + const char *name; + + pkgconf_list_t dir_list; + + pkgconf_list_t filter_libdirs; + pkgconf_list_t filter_includedirs; + + char *sysroot_dir; + + bool want_default_static; + bool want_default_pure; +}; + +/* client.c */ +PKGCONF_API void pkgconf_client_init(pkgconf_client_t *client, pkgconf_error_handler_func_t error_handler, void *error_handler_data, const pkgconf_cross_personality_t *personality); +PKGCONF_API pkgconf_client_t * pkgconf_client_new(pkgconf_error_handler_func_t error_handler, void *error_handler_data, const pkgconf_cross_personality_t *personality); +PKGCONF_API void pkgconf_client_deinit(pkgconf_client_t *client); +PKGCONF_API void pkgconf_client_free(pkgconf_client_t *client); +PKGCONF_API const char *pkgconf_client_get_sysroot_dir(const pkgconf_client_t *client); +PKGCONF_API void pkgconf_client_set_sysroot_dir(pkgconf_client_t *client, const char *sysroot_dir); +PKGCONF_API const char *pkgconf_client_get_buildroot_dir(const pkgconf_client_t *client); +PKGCONF_API void pkgconf_client_set_buildroot_dir(pkgconf_client_t *client, const char *buildroot_dir); +PKGCONF_API unsigned int pkgconf_client_get_flags(const pkgconf_client_t *client); +PKGCONF_API void pkgconf_client_set_flags(pkgconf_client_t *client, unsigned int flags); +PKGCONF_API const char *pkgconf_client_get_prefix_varname(const pkgconf_client_t *client); +PKGCONF_API void pkgconf_client_set_prefix_varname(pkgconf_client_t *client, const char *prefix_varname); +PKGCONF_API pkgconf_error_handler_func_t pkgconf_client_get_warn_handler(const pkgconf_client_t *client); +PKGCONF_API void pkgconf_client_set_warn_handler(pkgconf_client_t *client, pkgconf_error_handler_func_t warn_handler, void *warn_handler_data); +PKGCONF_API pkgconf_error_handler_func_t pkgconf_client_get_error_handler(const pkgconf_client_t *client); +PKGCONF_API void pkgconf_client_set_error_handler(pkgconf_client_t *client, pkgconf_error_handler_func_t error_handler, void *error_handler_data); +PKGCONF_API pkgconf_error_handler_func_t pkgconf_client_get_trace_handler(const pkgconf_client_t *client); +PKGCONF_API void pkgconf_client_set_trace_handler(pkgconf_client_t *client, pkgconf_error_handler_func_t trace_handler, void *trace_handler_data); +PKGCONF_API void pkgconf_client_dir_list_build(pkgconf_client_t *client, const pkgconf_cross_personality_t *personality); + +/* personality.c */ +PKGCONF_API pkgconf_cross_personality_t *pkgconf_cross_personality_default(void); +PKGCONF_API pkgconf_cross_personality_t *pkgconf_cross_personality_find(const char *triplet); +PKGCONF_API void pkgconf_cross_personality_deinit(pkgconf_cross_personality_t *personality); + +#define PKGCONF_IS_MODULE_SEPARATOR(c) ((c) == ',' || isspace ((unsigned int)(c))) +#define PKGCONF_IS_OPERATOR_CHAR(c) ((c) == '<' || (c) == '>' || (c) == '!' || (c) == '=') + +#define PKGCONF_PKG_PKGF_NONE 0x0000 +#define PKGCONF_PKG_PKGF_SEARCH_PRIVATE 0x0001 +#define PKGCONF_PKG_PKGF_ENV_ONLY 0x0002 +#define PKGCONF_PKG_PKGF_NO_UNINSTALLED 0x0004 +#define PKGCONF_PKG_PKGF_SKIP_ROOT_VIRTUAL 0x0008 +#define PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS 0x0010 +#define PKGCONF_PKG_PKGF_SKIP_CONFLICTS 0x0020 +#define PKGCONF_PKG_PKGF_NO_CACHE 0x0040 +#define PKGCONF_PKG_PKGF_SKIP_ERRORS 0x0080 +#define PKGCONF_PKG_PKGF_ITER_PKG_IS_PRIVATE 0x0100 +#define PKGCONF_PKG_PKGF_SKIP_PROVIDES 0x0200 +#define PKGCONF_PKG_PKGF_REDEFINE_PREFIX 0x0400 +#define PKGCONF_PKG_PKGF_DONT_RELOCATE_PATHS 0x0800 +#define PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS 0x1000 +#define PKGCONF_PKG_PKGF_DONT_FILTER_INTERNAL_CFLAGS 0x2000 +#define PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS 0x4000 +#define PKGCONF_PKG_PKGF_FDO_SYSROOT_RULES 0x8000 +#define PKGCONF_PKG_PKGF_PKGCONF1_SYSROOT_RULES 0x10000 + +#define PKGCONF_PKG_DEPF_INTERNAL 0x1 +#define PKGCONF_PKG_DEPF_PRIVATE 0x2 + +#define PKGCONF_PKG_ERRF_OK 0x0 +#define PKGCONF_PKG_ERRF_PACKAGE_NOT_FOUND 0x1 +#define PKGCONF_PKG_ERRF_PACKAGE_VER_MISMATCH 0x2 +#define PKGCONF_PKG_ERRF_PACKAGE_CONFLICT 0x4 +#define PKGCONF_PKG_ERRF_DEPGRAPH_BREAK 0x8 + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) +#define PRINTFLIKE(fmtarg, firstvararg) \ + __attribute__((__format__ (__printf__, fmtarg, firstvararg))) +#define DEPRECATED \ + __attribute__((deprecated)) +#else +#define PRINTFLIKE(fmtarg, firstvararg) +#define DEPRECATED +#endif /* defined(__INTEL_COMPILER) || defined(__GNUC__) */ + +/* parser.c */ +typedef void (*pkgconf_parser_operand_func_t)(void *data, const size_t lineno, const char *key, const char *value); +typedef void (*pkgconf_parser_warn_func_t)(void *data, const char *fmt, ...); + +PKGCONF_API void pkgconf_parser_parse(FILE *f, void *data, const pkgconf_parser_operand_func_t *ops, const pkgconf_parser_warn_func_t warnfunc, const char *filename); + +/* pkg.c */ +PKGCONF_API bool pkgconf_error(const pkgconf_client_t *client, const char *format, ...) PRINTFLIKE(2, 3); +PKGCONF_API bool pkgconf_warn(const pkgconf_client_t *client, const char *format, ...) PRINTFLIKE(2, 3); +PKGCONF_API bool pkgconf_trace(const pkgconf_client_t *client, const char *filename, size_t lineno, const char *funcname, const char *format, ...) PRINTFLIKE(5, 6); +PKGCONF_API bool pkgconf_default_error_handler(const char *msg, const pkgconf_client_t *client, void *data); + +#ifndef PKGCONF_LITE +#if defined(__GNUC__) || defined(__INTEL_COMPILER) +#define PKGCONF_TRACE(client, ...) do { \ + pkgconf_trace(client, __FILE__, __LINE__, __PRETTY_FUNCTION__, __VA_ARGS__); \ + } while (0) +#else +#define PKGCONF_TRACE(client, ...) do { \ + pkgconf_trace(client, __FILE__, __LINE__, __func__, __VA_ARGS__); \ + } while (0) +#endif +#else +#define PKGCONF_TRACE(client, ...) +#endif + +PKGCONF_API pkgconf_pkg_t *pkgconf_pkg_ref(pkgconf_client_t *client, pkgconf_pkg_t *pkg); +PKGCONF_API void pkgconf_pkg_unref(pkgconf_client_t *client, pkgconf_pkg_t *pkg); +PKGCONF_API void pkgconf_pkg_free(pkgconf_client_t *client, pkgconf_pkg_t *pkg); +PKGCONF_API pkgconf_pkg_t *pkgconf_pkg_find(pkgconf_client_t *client, const char *name); +PKGCONF_API unsigned int pkgconf_pkg_traverse(pkgconf_client_t *client, pkgconf_pkg_t *root, pkgconf_pkg_traverse_func_t func, void *data, int maxdepth, unsigned int skip_flags); +PKGCONF_API unsigned int pkgconf_pkg_verify_graph(pkgconf_client_t *client, pkgconf_pkg_t *root, int depth); +PKGCONF_API pkgconf_pkg_t *pkgconf_pkg_verify_dependency(pkgconf_client_t *client, pkgconf_dependency_t *pkgdep, unsigned int *eflags); +PKGCONF_API const char *pkgconf_pkg_get_comparator(const pkgconf_dependency_t *pkgdep); +PKGCONF_API unsigned int pkgconf_pkg_cflags(pkgconf_client_t *client, pkgconf_pkg_t *root, pkgconf_list_t *list, int maxdepth); +PKGCONF_API unsigned int pkgconf_pkg_libs(pkgconf_client_t *client, pkgconf_pkg_t *root, pkgconf_list_t *list, int maxdepth); +PKGCONF_API pkgconf_pkg_comparator_t pkgconf_pkg_comparator_lookup_by_name(const char *name); +PKGCONF_API pkgconf_pkg_t *pkgconf_builtin_pkg_get(const char *name); + +PKGCONF_API int pkgconf_compare_version(const char *a, const char *b); +PKGCONF_API pkgconf_pkg_t *pkgconf_scan_all(pkgconf_client_t *client, void *ptr, pkgconf_pkg_iteration_func_t func); + +/* parse.c */ +PKGCONF_API pkgconf_pkg_t *pkgconf_pkg_new_from_file(pkgconf_client_t *client, const char *path, FILE *f, unsigned int flags); +PKGCONF_API void pkgconf_dependency_parse_str(pkgconf_client_t *client, pkgconf_list_t *deplist_head, const char *depends, unsigned int flags); +PKGCONF_API void pkgconf_dependency_parse(pkgconf_client_t *client, pkgconf_pkg_t *pkg, pkgconf_list_t *deplist_head, const char *depends, unsigned int flags); +PKGCONF_API void pkgconf_dependency_append(pkgconf_list_t *list, pkgconf_dependency_t *tail); +PKGCONF_API void pkgconf_dependency_free(pkgconf_list_t *list); +PKGCONF_API void pkgconf_dependency_free_one(pkgconf_dependency_t *dep); +PKGCONF_API pkgconf_dependency_t *pkgconf_dependency_add(pkgconf_client_t *client, pkgconf_list_t *list, const char *package, const char *version, pkgconf_pkg_comparator_t compare, unsigned int flags); +PKGCONF_API pkgconf_dependency_t *pkgconf_dependency_ref(pkgconf_client_t *client, pkgconf_dependency_t *dep); +PKGCONF_API void pkgconf_dependency_unref(pkgconf_client_t *client, pkgconf_dependency_t *dep); +PKGCONF_API pkgconf_dependency_t *pkgconf_dependency_copy(pkgconf_client_t *client, const pkgconf_dependency_t *dep); + +/* argvsplit.c */ +PKGCONF_API int pkgconf_argv_split(const char *src, int *argc, char ***argv); +PKGCONF_API void pkgconf_argv_free(char **argv); + +/* fragment.c */ +typedef struct pkgconf_fragment_render_ops_ { + size_t (*render_len)(const pkgconf_list_t *list, bool escape); + void (*render_buf)(const pkgconf_list_t *list, char *buf, size_t len, bool escape); +} pkgconf_fragment_render_ops_t; + +typedef bool (*pkgconf_fragment_filter_func_t)(const pkgconf_client_t *client, const pkgconf_fragment_t *frag, void *data); +PKGCONF_API bool pkgconf_fragment_parse(const pkgconf_client_t *client, pkgconf_list_t *list, pkgconf_list_t *vars, const char *value, unsigned int flags); +PKGCONF_API void pkgconf_fragment_add(const pkgconf_client_t *client, pkgconf_list_t *list, const char *string, unsigned int flags); +PKGCONF_API void pkgconf_fragment_copy(const pkgconf_client_t *client, pkgconf_list_t *list, const pkgconf_fragment_t *base, bool is_private); +PKGCONF_API void pkgconf_fragment_copy_list(const pkgconf_client_t *client, pkgconf_list_t *list, const pkgconf_list_t *base); +PKGCONF_API void pkgconf_fragment_delete(pkgconf_list_t *list, pkgconf_fragment_t *node); +PKGCONF_API void pkgconf_fragment_free(pkgconf_list_t *list); +PKGCONF_API void pkgconf_fragment_filter(const pkgconf_client_t *client, pkgconf_list_t *dest, pkgconf_list_t *src, pkgconf_fragment_filter_func_t filter_func, void *data); +PKGCONF_API size_t pkgconf_fragment_render_len(const pkgconf_list_t *list, bool escape, const pkgconf_fragment_render_ops_t *ops); +PKGCONF_API void pkgconf_fragment_render_buf(const pkgconf_list_t *list, char *buf, size_t len, bool escape, const pkgconf_fragment_render_ops_t *ops); +PKGCONF_API char *pkgconf_fragment_render(const pkgconf_list_t *list, bool escape, const pkgconf_fragment_render_ops_t *ops); +PKGCONF_API bool pkgconf_fragment_has_system_dir(const pkgconf_client_t *client, const pkgconf_fragment_t *frag); + +/* fileio.c */ +PKGCONF_API char *pkgconf_fgetline(char *line, size_t size, FILE *stream); + +/* tuple.c */ +PKGCONF_API pkgconf_tuple_t *pkgconf_tuple_add(const pkgconf_client_t *client, pkgconf_list_t *parent, const char *key, const char *value, bool parse, unsigned int flags); +PKGCONF_API char *pkgconf_tuple_find(const pkgconf_client_t *client, pkgconf_list_t *list, const char *key); +PKGCONF_API char *pkgconf_tuple_parse(const pkgconf_client_t *client, pkgconf_list_t *list, const char *value, unsigned int flags); +PKGCONF_API void pkgconf_tuple_free(pkgconf_list_t *list); +PKGCONF_API void pkgconf_tuple_free_entry(pkgconf_tuple_t *tuple, pkgconf_list_t *list); +PKGCONF_API void pkgconf_tuple_add_global(pkgconf_client_t *client, const char *key, const char *value); +PKGCONF_API char *pkgconf_tuple_find_global(const pkgconf_client_t *client, const char *key); +PKGCONF_API void pkgconf_tuple_free_global(pkgconf_client_t *client); +PKGCONF_API void pkgconf_tuple_define_global(pkgconf_client_t *client, const char *kv); + +/* queue.c */ +PKGCONF_API void pkgconf_queue_push(pkgconf_list_t *list, const char *package); +PKGCONF_API bool pkgconf_queue_compile(pkgconf_client_t *client, pkgconf_pkg_t *world, pkgconf_list_t *list); +PKGCONF_API bool pkgconf_queue_solve(pkgconf_client_t *client, pkgconf_list_t *list, pkgconf_pkg_t *world, int maxdepth); +PKGCONF_API void pkgconf_queue_free(pkgconf_list_t *list); +PKGCONF_API bool pkgconf_queue_apply(pkgconf_client_t *client, pkgconf_list_t *list, pkgconf_queue_apply_func_t func, int maxdepth, void *data); +PKGCONF_API bool pkgconf_queue_validate(pkgconf_client_t *client, pkgconf_list_t *list, int maxdepth); +PKGCONF_API void pkgconf_solution_free(pkgconf_client_t *client, pkgconf_pkg_t *world); + +/* cache.c */ +PKGCONF_API pkgconf_pkg_t *pkgconf_cache_lookup(pkgconf_client_t *client, const char *id); +PKGCONF_API void pkgconf_cache_add(pkgconf_client_t *client, pkgconf_pkg_t *pkg); +PKGCONF_API void pkgconf_cache_remove(pkgconf_client_t *client, pkgconf_pkg_t *pkg); +PKGCONF_API void pkgconf_cache_free(pkgconf_client_t *client); + +/* audit.c */ +PKGCONF_API void pkgconf_audit_set_log(pkgconf_client_t *client, FILE *auditf); +PKGCONF_API void pkgconf_audit_log(pkgconf_client_t *client, const char *format, ...) PRINTFLIKE(2, 3); +PKGCONF_API void pkgconf_audit_log_dependency(pkgconf_client_t *client, const pkgconf_pkg_t *dep, const pkgconf_dependency_t *depnode); + +/* path.c */ +PKGCONF_API void pkgconf_path_add(const char *text, pkgconf_list_t *dirlist, bool filter); +PKGCONF_API size_t pkgconf_path_split(const char *text, pkgconf_list_t *dirlist, bool filter); +PKGCONF_API size_t pkgconf_path_build_from_environ(const char *envvarname, const char *fallback, pkgconf_list_t *dirlist, bool filter); +PKGCONF_API bool pkgconf_path_match_list(const char *path, const pkgconf_list_t *dirlist); +PKGCONF_API void pkgconf_path_free(pkgconf_list_t *dirlist); +PKGCONF_API bool pkgconf_path_relocate(char *buf, size_t buflen); +PKGCONF_API void pkgconf_path_copy_list(pkgconf_list_t *dst, const pkgconf_list_t *src); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/pkgconf/libpkgconf/stdinc.h b/include/pkgconf/libpkgconf/stdinc.h new file mode 100644 index 0000000..b38870b --- /dev/null +++ b/include/pkgconf/libpkgconf/stdinc.h @@ -0,0 +1,70 @@ +/* + * stdinc.h + * pull in standard headers (including portability hacks) + * + * Copyright (c) 2012 pkgconf authors (see AUTHORS). + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * This software is provided 'as is' and without any warranty, express or + * implied. In no event shall the authors be liable for any damages arising + * from the use of this software. + */ + +#ifndef LIBPKGCONF_STDINC_H +#define LIBPKGCONF_STDINC_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +# define WIN32_LEAN_AND_MEAN +# include +# include +# define PATH_DEV_NULL "nul" +# ifdef _WIN64 +# define SIZE_FMT_SPECIFIER "%I64u" +# else +# define SIZE_FMT_SPECIFIER "%u" +# endif +# ifndef ssize_t +# ifndef __MINGW32__ +# include +# else +# include +# endif +# define ssize_t SSIZE_T +# endif +# ifndef __MINGW32__ +# include "win-dirent.h" +# else +# include +# endif +# define PKGCONF_ITEM_SIZE (_MAX_PATH + 1024) +#else +# define PATH_DEV_NULL "/dev/null" +# define SIZE_FMT_SPECIFIER "%zu" +# ifdef __HAIKU__ +# include +# endif +# include +# include +# include +# include +# ifdef PATH_MAX +# define PKGCONF_ITEM_SIZE (PATH_MAX + 1024) +# else +# define PKGCONF_ITEM_SIZE (4096 + 1024) +# endif +#endif + +#endif diff --git a/lib/bfd-plugins/libdep.so b/lib/bfd-plugins/libdep.so new file mode 100755 index 0000000..0f481e4 Binary files /dev/null and b/lib/bfd-plugins/libdep.so differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/crtbegin.o b/lib/gcc/x86_64-linux-musl/12.2.0/crtbegin.o new file mode 100644 index 0000000..b8f7408 Binary files /dev/null and b/lib/gcc/x86_64-linux-musl/12.2.0/crtbegin.o differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/crtbeginS.o b/lib/gcc/x86_64-linux-musl/12.2.0/crtbeginS.o new file mode 100644 index 0000000..d72ffb8 Binary files /dev/null and b/lib/gcc/x86_64-linux-musl/12.2.0/crtbeginS.o differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/crtbeginT.o b/lib/gcc/x86_64-linux-musl/12.2.0/crtbeginT.o new file mode 100644 index 0000000..b8f7408 Binary files /dev/null and b/lib/gcc/x86_64-linux-musl/12.2.0/crtbeginT.o differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/crtend.o b/lib/gcc/x86_64-linux-musl/12.2.0/crtend.o new file mode 100644 index 0000000..714cff0 Binary files /dev/null and b/lib/gcc/x86_64-linux-musl/12.2.0/crtend.o differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/crtendS.o b/lib/gcc/x86_64-linux-musl/12.2.0/crtendS.o new file mode 100644 index 0000000..714cff0 Binary files /dev/null and b/lib/gcc/x86_64-linux-musl/12.2.0/crtendS.o differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/crtfastmath.o b/lib/gcc/x86_64-linux-musl/12.2.0/crtfastmath.o new file mode 100644 index 0000000..2752d0d Binary files /dev/null and b/lib/gcc/x86_64-linux-musl/12.2.0/crtfastmath.o differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/crtprec32.o b/lib/gcc/x86_64-linux-musl/12.2.0/crtprec32.o new file mode 100644 index 0000000..b8ee830 Binary files /dev/null and b/lib/gcc/x86_64-linux-musl/12.2.0/crtprec32.o differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/crtprec64.o b/lib/gcc/x86_64-linux-musl/12.2.0/crtprec64.o new file mode 100644 index 0000000..fc420a2 Binary files /dev/null and b/lib/gcc/x86_64-linux-musl/12.2.0/crtprec64.o differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/crtprec80.o b/lib/gcc/x86_64-linux-musl/12.2.0/crtprec80.o new file mode 100644 index 0000000..ad3861a Binary files /dev/null and b/lib/gcc/x86_64-linux-musl/12.2.0/crtprec80.o differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include-fixed/README b/lib/gcc/x86_64-linux-musl/12.2.0/include-fixed/README new file mode 100644 index 0000000..7086a77 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include-fixed/README @@ -0,0 +1,14 @@ +This README file is copied into the directory for GCC-only header files +when fixincludes is run by the makefile for GCC. + +Many of the files in this directory were automatically edited from the +standard system header files by the fixincludes process. They are +system-specific, and will not work on any other kind of system. They +are also not part of GCC. The reason we have to do this is because +GCC requires ANSI C headers and many vendors supply ANSI-incompatible +headers. + +Because this is an automated process, sometimes headers get "fixed" +that do not, strictly speaking, need a fix. As long as nothing is broken +by the process, it is just an unfortunate collateral inconvenience. +We would like to rectify it, if it is not "too inconvenient". diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include-fixed/limits.h b/lib/gcc/x86_64-linux-musl/12.2.0/include-fixed/limits.h new file mode 100644 index 0000000..9390f01 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include-fixed/limits.h @@ -0,0 +1,206 @@ +/* Copyright (C) 1992-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* This administrivia gets added to the beginning of limits.h + if the system has its own version of limits.h. */ + +/* We use _GCC_LIMITS_H_ because we want this not to match + any macros that the system's limits.h uses for its own purposes. */ +#ifndef _GCC_LIMITS_H_ /* Terminated in limity.h. */ +#define _GCC_LIMITS_H_ + +#ifndef _LIBC_LIMITS_H_ +/* Use "..." so that we find syslimits.h only in this same directory. */ +#include "syslimits.h" +#endif +/* Copyright (C) 1991-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#ifndef _LIMITS_H___ +#define _LIMITS_H___ + +/* Number of bits in a `char'. */ +#undef CHAR_BIT +#define CHAR_BIT __CHAR_BIT__ + +/* Maximum length of a multibyte character. */ +#ifndef MB_LEN_MAX +#define MB_LEN_MAX 1 +#endif + +/* Minimum and maximum values a `signed char' can hold. */ +#undef SCHAR_MIN +#define SCHAR_MIN (-SCHAR_MAX - 1) +#undef SCHAR_MAX +#define SCHAR_MAX __SCHAR_MAX__ + +/* Maximum value an `unsigned char' can hold. (Minimum is 0). */ +#undef UCHAR_MAX +#if __SCHAR_MAX__ == __INT_MAX__ +# define UCHAR_MAX (SCHAR_MAX * 2U + 1U) +#else +# define UCHAR_MAX (SCHAR_MAX * 2 + 1) +#endif + +/* Minimum and maximum values a `char' can hold. */ +#ifdef __CHAR_UNSIGNED__ +# undef CHAR_MIN +# if __SCHAR_MAX__ == __INT_MAX__ +# define CHAR_MIN 0U +# else +# define CHAR_MIN 0 +# endif +# undef CHAR_MAX +# define CHAR_MAX UCHAR_MAX +#else +# undef CHAR_MIN +# define CHAR_MIN SCHAR_MIN +# undef CHAR_MAX +# define CHAR_MAX SCHAR_MAX +#endif + +/* Minimum and maximum values a `signed short int' can hold. */ +#undef SHRT_MIN +#define SHRT_MIN (-SHRT_MAX - 1) +#undef SHRT_MAX +#define SHRT_MAX __SHRT_MAX__ + +/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */ +#undef USHRT_MAX +#if __SHRT_MAX__ == __INT_MAX__ +# define USHRT_MAX (SHRT_MAX * 2U + 1U) +#else +# define USHRT_MAX (SHRT_MAX * 2 + 1) +#endif + +/* Minimum and maximum values a `signed int' can hold. */ +#undef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#undef INT_MAX +#define INT_MAX __INT_MAX__ + +/* Maximum value an `unsigned int' can hold. (Minimum is 0). */ +#undef UINT_MAX +#define UINT_MAX (INT_MAX * 2U + 1U) + +/* Minimum and maximum values a `signed long int' can hold. + (Same as `int'). */ +#undef LONG_MIN +#define LONG_MIN (-LONG_MAX - 1L) +#undef LONG_MAX +#define LONG_MAX __LONG_MAX__ + +/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */ +#undef ULONG_MAX +#define ULONG_MAX (LONG_MAX * 2UL + 1UL) + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +/* Minimum and maximum values a `signed long long int' can hold. */ +# undef LLONG_MIN +# define LLONG_MIN (-LLONG_MAX - 1LL) +# undef LLONG_MAX +# define LLONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +# undef ULLONG_MAX +# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) +#endif + +#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__) +/* Minimum and maximum values a `signed long long int' can hold. */ +# undef LONG_LONG_MIN +# define LONG_LONG_MIN (-LONG_LONG_MAX - 1LL) +# undef LONG_LONG_MAX +# define LONG_LONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +# undef ULONG_LONG_MAX +# define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1ULL) +#endif + +#if (defined __STDC_WANT_IEC_60559_BFP_EXT__ \ + || (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L)) +/* TS 18661-1 / C2X widths of integer types. */ +# undef CHAR_WIDTH +# define CHAR_WIDTH __SCHAR_WIDTH__ +# undef SCHAR_WIDTH +# define SCHAR_WIDTH __SCHAR_WIDTH__ +# undef UCHAR_WIDTH +# define UCHAR_WIDTH __SCHAR_WIDTH__ +# undef SHRT_WIDTH +# define SHRT_WIDTH __SHRT_WIDTH__ +# undef USHRT_WIDTH +# define USHRT_WIDTH __SHRT_WIDTH__ +# undef INT_WIDTH +# define INT_WIDTH __INT_WIDTH__ +# undef UINT_WIDTH +# define UINT_WIDTH __INT_WIDTH__ +# undef LONG_WIDTH +# define LONG_WIDTH __LONG_WIDTH__ +# undef ULONG_WIDTH +# define ULONG_WIDTH __LONG_WIDTH__ +# undef LLONG_WIDTH +# define LLONG_WIDTH __LONG_LONG_WIDTH__ +# undef ULLONG_WIDTH +# define ULLONG_WIDTH __LONG_LONG_WIDTH__ +#endif + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L +/* C2X width and limit of _Bool. */ +# undef BOOL_MAX +# define BOOL_MAX 1 +# undef BOOL_WIDTH +# define BOOL_WIDTH 1 +#endif + +#endif /* _LIMITS_H___ */ +/* This administrivia gets added to the end of limits.h + if the system has its own version of limits.h. */ + +#else /* not _GCC_LIMITS_H_ */ + +#ifdef _GCC_NEXT_LIMITS_H +#include_next /* recurse down to the real one */ +#endif + +#endif /* not _GCC_LIMITS_H_ */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include-fixed/syslimits.h b/lib/gcc/x86_64-linux-musl/12.2.0/include-fixed/syslimits.h new file mode 100644 index 0000000..a362802 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include-fixed/syslimits.h @@ -0,0 +1,8 @@ +/* syslimits.h stands for the system's own limits.h file. + If we can use it ok unmodified, then we install this text. + If fixincludes fixes it, then the fixed version is installed + instead of this text. */ + +#define _GCC_NEXT_LIMITS_H /* tell gcc's limits.h to recurse */ +#include_next +#undef _GCC_NEXT_LIMITS_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/adxintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/adxintrin.h new file mode 100644 index 0000000..23590e5 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/adxintrin.h @@ -0,0 +1,81 @@ +/* Copyright (C) 2012-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _ADXINTRIN_H_INCLUDED +#define _ADXINTRIN_H_INCLUDED + +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_subborrow_u32 (unsigned char __CF, unsigned int __X, + unsigned int __Y, unsigned int *__P) +{ + return __builtin_ia32_sbb_u32 (__CF, __X, __Y, __P); +} + +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_addcarry_u32 (unsigned char __CF, unsigned int __X, + unsigned int __Y, unsigned int *__P) +{ + return __builtin_ia32_addcarryx_u32 (__CF, __X, __Y, __P); +} + +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_addcarryx_u32 (unsigned char __CF, unsigned int __X, + unsigned int __Y, unsigned int *__P) +{ + return __builtin_ia32_addcarryx_u32 (__CF, __X, __Y, __P); +} + +#ifdef __x86_64__ +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_subborrow_u64 (unsigned char __CF, unsigned long long __X, + unsigned long long __Y, unsigned long long *__P) +{ + return __builtin_ia32_sbb_u64 (__CF, __X, __Y, __P); +} + +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_addcarry_u64 (unsigned char __CF, unsigned long long __X, + unsigned long long __Y, unsigned long long *__P) +{ + return __builtin_ia32_addcarryx_u64 (__CF, __X, __Y, __P); +} + +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_addcarryx_u64 (unsigned char __CF, unsigned long long __X, + unsigned long long __Y, unsigned long long *__P) +{ + return __builtin_ia32_addcarryx_u64 (__CF, __X, __Y, __P); +} +#endif + +#endif /* _ADXINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/ammintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/ammintrin.h new file mode 100644 index 0000000..86e4c4c --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/ammintrin.h @@ -0,0 +1,93 @@ +/* Copyright (C) 2007-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Implemented from the specification included in the AMD Programmers + Manual Update, version 2.x */ + +#ifndef _AMMINTRIN_H_INCLUDED +#define _AMMINTRIN_H_INCLUDED + +/* We need definitions from the SSE3, SSE2 and SSE header files*/ +#include + +#ifndef __SSE4A__ +#pragma GCC push_options +#pragma GCC target("sse4a") +#define __DISABLE_SSE4A__ +#endif /* __SSE4A__ */ + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_sd (double * __P, __m128d __Y) +{ + __builtin_ia32_movntsd (__P, (__v2df) __Y); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_ss (float * __P, __m128 __Y) +{ + __builtin_ia32_movntss (__P, (__v4sf) __Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_si64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_extrq ((__v2di) __X, (__v16qi) __Y); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extracti_si64 (__m128i __X, unsigned const int __I, unsigned const int __L) +{ + return (__m128i) __builtin_ia32_extrqi ((__v2di) __X, __I, __L); +} +#else +#define _mm_extracti_si64(X, I, L) \ + ((__m128i) __builtin_ia32_extrqi ((__v2di)(__m128i)(X), \ + (unsigned int)(I), (unsigned int)(L))) +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_si64 (__m128i __X,__m128i __Y) +{ + return (__m128i) __builtin_ia32_insertq ((__v2di)__X, (__v2di)__Y); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_inserti_si64(__m128i __X, __m128i __Y, unsigned const int __I, unsigned const int __L) +{ + return (__m128i) __builtin_ia32_insertqi ((__v2di)__X, (__v2di)__Y, __I, __L); +} +#else +#define _mm_inserti_si64(X, Y, I, L) \ + ((__m128i) __builtin_ia32_insertqi ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), \ + (unsigned int)(I), (unsigned int)(L))) +#endif + +#ifdef __DISABLE_SSE4A__ +#undef __DISABLE_SSE4A__ +#pragma GCC pop_options +#endif /* __DISABLE_SSE4A__ */ + +#endif /* _AMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/amxbf16intrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/amxbf16intrin.h new file mode 100644 index 0000000..6b49870 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/amxbf16intrin.h @@ -0,0 +1,52 @@ +/* Copyright (C) 2020-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AMXBF16INTRIN_H_INCLUDED +#define _AMXBF16INTRIN_H_INCLUDED + +#if !defined(__AMX_BF16__) +#pragma GCC push_options +#pragma GCC target("amx-bf16") +#define __DISABLE_AMX_BF16__ +#endif /* __AMX_BF16__ */ + +#if defined(__x86_64__) +#define _tile_dpbf16ps_internal(dst,src1,src2) \ + __asm__ volatile\ + ("{tdpbf16ps\t%%tmm"#src2", %%tmm"#src1", %%tmm"#dst"|tdpbf16ps\t%%tmm"#dst", %%tmm"#src1", %%tmm"#src2"}" ::) + +#define _tile_dpbf16ps(dst,src1,src2) \ + _tile_dpbf16ps_internal (dst, src1, src2) + +#endif + +#ifdef __DISABLE_AMX_BF16__ +#undef __DISABLE_AMX_BF16__ +#pragma GCC pop_options +#endif /* __DISABLE_AMX_BF16__ */ + +#endif /* _AMXBF16INTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/amxint8intrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/amxint8intrin.h new file mode 100644 index 0000000..2f271df --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/amxint8intrin.h @@ -0,0 +1,61 @@ +/* Copyright (C) 2020-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AMXINT8INTRIN_H_INCLUDED +#define _AMXINT8INTRIN_H_INCLUDED + +#if !defined(__AMX_INT8__) +#pragma GCC push_options +#pragma GCC target("amx-int8") +#define __DISABLE_AMX_INT8__ +#endif /* __AMX_INT8__ */ + +#if defined(__x86_64__) +#define _tile_int8_dp_internal(name,dst,src1,src2) \ + __asm__ volatile \ + ("{"#name"\t%%tmm"#src2", %%tmm"#src1", %%tmm"#dst"|"#name"\t%%tmm"#dst", %%tmm"#src1", %%tmm"#src2"}" ::) + +#define _tile_dpbssd(dst,src1,src2) \ + _tile_int8_dp_internal (tdpbssd, dst, src1, src2) + +#define _tile_dpbsud(dst,src1,src2) \ + _tile_int8_dp_internal (tdpbsud, dst, src1, src2) + +#define _tile_dpbusd(dst,src1,src2) \ + _tile_int8_dp_internal (tdpbusd, dst, src1, src2) + +#define _tile_dpbuud(dst,src1,src2) \ + _tile_int8_dp_internal (tdpbuud, dst, src1, src2) + +#endif + +#ifdef __DISABLE_AMX_INT8__ +#undef __DISABLE_AMX_INT8__ +#pragma GCC pop_options +#endif /* __DISABLE_AMX_INT8__ */ + +#endif /* _AMXINT8INTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/amxtileintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/amxtileintrin.h new file mode 100644 index 0000000..7b5a39e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/amxtileintrin.h @@ -0,0 +1,98 @@ +/* Copyright (C) 2020-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AMXTILEINTRIN_H_INCLUDED +#define _AMXTILEINTRIN_H_INCLUDED + +#if !defined(__AMX_TILE__) +#pragma GCC push_options +#pragma GCC target("amx-tile") +#define __DISABLE_AMX_TILE__ +#endif /* __AMX_TILE__ */ + +#if defined(__x86_64__) +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_tile_loadconfig (const void *__config) +{ + __asm__ volatile ("ldtilecfg\t%X0" :: "m" (*((const void **)__config))); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_tile_storeconfig (void *__config) +{ + __asm__ volatile ("sttilecfg\t%X0" : "=m" (*((void **)__config))); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_tile_release (void) +{ + __asm__ volatile ("tilerelease" ::); +} + +#define _tile_loadd(dst,base,stride) \ + _tile_loadd_internal (dst, base, stride) + +#define _tile_loadd_internal(dst,base,stride) \ + __asm__ volatile \ + ("{tileloadd\t(%0,%1,1), %%tmm"#dst"|tileloadd\t%%tmm"#dst", [%0+%1*1]}" \ + :: "r" ((const void*) (base)), "r" ((long) (stride))) + +#define _tile_stream_loadd(dst,base,stride) \ + _tile_stream_loadd_internal (dst, base, stride) + +#define _tile_stream_loadd_internal(dst,base,stride) \ + __asm__ volatile \ + ("{tileloaddt1\t(%0,%1,1), %%tmm"#dst"|tileloaddt1\t%%tmm"#dst", [%0+%1*1]}" \ + :: "r" ((const void*) (base)), "r" ((long) (stride))) + +#define _tile_stored(dst,base,stride) \ + _tile_stored_internal (dst, base, stride) + +#define _tile_stored_internal(src,base,stride) \ + __asm__ volatile \ + ("{tilestored\t%%tmm"#src", (%0,%1,1)|tilestored\t[%0+%1*1], %%tmm"#src"}" \ + :: "r" ((void*) (base)), "r" ((long) (stride)) \ + : "memory") + +#define _tile_zero(dst) \ + _tile_zero_internal (dst) + +#define _tile_zero_internal(dst) \ + __asm__ volatile \ + ("tilezero\t%%tmm"#dst ::) + +#endif + +#ifdef __DISABLE_AMX_TILE__ +#undef __DISABLE_AMX_TILE__ +#pragma GCC pop_options +#endif /* __DISABLE_AMX_TILE__ */ + +#endif /* _AMXTILEINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx2intrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx2intrin.h new file mode 100644 index 0000000..e376448 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx2intrin.h @@ -0,0 +1,1923 @@ +/* Copyright (C) 2011-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _AVX2INTRIN_H_INCLUDED +#define _AVX2INTRIN_H_INCLUDED + +#ifndef __AVX2__ +#pragma GCC push_options +#pragma GCC target("avx2") +#define __DISABLE_AVX2__ +#endif /* __AVX2__ */ + +/* Sum absolute 8-bit integer difference of adjacent groups of 4 + byte integers in the first 2 operands. Starting offsets within + operands are determined by the 3rd mask operand. */ +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mpsadbw_epu8 (__m256i __X, __m256i __Y, const int __M) +{ + return (__m256i) __builtin_ia32_mpsadbw256 ((__v32qi)__X, + (__v32qi)__Y, __M); +} +#else +#define _mm256_mpsadbw_epu8(X, Y, M) \ + ((__m256i) __builtin_ia32_mpsadbw256 ((__v32qi)(__m256i)(X), \ + (__v32qi)(__m256i)(Y), (int)(M))) +#endif + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_abs_epi8 (__m256i __A) +{ + return (__m256i)__builtin_ia32_pabsb256 ((__v32qi)__A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_abs_epi16 (__m256i __A) +{ + return (__m256i)__builtin_ia32_pabsw256 ((__v16hi)__A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_abs_epi32 (__m256i __A) +{ + return (__m256i)__builtin_ia32_pabsd256 ((__v8si)__A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_packs_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_packssdw256 ((__v8si)__A, (__v8si)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_packs_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_packsswb256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_packus_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_packusdw256 ((__v8si)__A, (__v8si)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_packus_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_packuswb256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_add_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v32qu)__A + (__v32qu)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_add_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v16hu)__A + (__v16hu)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_add_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v8su)__A + (__v8su)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_add_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v4du)__A + (__v4du)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_adds_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_paddsb256 ((__v32qi)__A, (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_adds_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_paddsw256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_adds_epu8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_paddusb256 ((__v32qi)__A, (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_adds_epu16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_paddusw256 ((__v16hi)__A, (__v16hi)__B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_alignr_epi8 (__m256i __A, __m256i __B, const int __N) +{ + return (__m256i) __builtin_ia32_palignr256 ((__v4di)__A, + (__v4di)__B, + __N * 8); +} +#else +/* In that case (__N*8) will be in vreg, and insn will not be matched. */ +/* Use define instead */ +#define _mm256_alignr_epi8(A, B, N) \ + ((__m256i) __builtin_ia32_palignr256 ((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), \ + (int)(N) * 8)) +#endif + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_and_si256 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v4du)__A & (__v4du)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_andnot_si256 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_andnotsi256 ((__v4di)__A, (__v4di)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_avg_epu8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pavgb256 ((__v32qi)__A, (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_avg_epu16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pavgw256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_blendv_epi8 (__m256i __X, __m256i __Y, __m256i __M) +{ + return (__m256i) __builtin_ia32_pblendvb256 ((__v32qi)__X, + (__v32qi)__Y, + (__v32qi)__M); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_blend_epi16 (__m256i __X, __m256i __Y, const int __M) +{ + return (__m256i) __builtin_ia32_pblendw256 ((__v16hi)__X, + (__v16hi)__Y, + __M); +} +#else +#define _mm256_blend_epi16(X, Y, M) \ + ((__m256i) __builtin_ia32_pblendw256 ((__v16hi)(__m256i)(X), \ + (__v16hi)(__m256i)(Y), (int)(M))) +#endif + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpeq_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v32qi)__A == (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpeq_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v16hi)__A == (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpeq_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v8si)__A == (__v8si)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpeq_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v4di)__A == (__v4di)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpgt_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v32qs)__A > (__v32qs)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpgt_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v16hi)__A > (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpgt_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v8si)__A > (__v8si)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpgt_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v4di)__A > (__v4di)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hadd_epi16 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_phaddw256 ((__v16hi)__X, + (__v16hi)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hadd_epi32 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_phaddd256 ((__v8si)__X, (__v8si)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hadds_epi16 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_phaddsw256 ((__v16hi)__X, + (__v16hi)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hsub_epi16 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_phsubw256 ((__v16hi)__X, + (__v16hi)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hsub_epi32 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_phsubd256 ((__v8si)__X, (__v8si)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hsubs_epi16 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_phsubsw256 ((__v16hi)__X, + (__v16hi)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maddubs_epi16 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmaddubsw256 ((__v32qi)__X, + (__v32qi)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_madd_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pmaddwd256 ((__v16hi)__A, + (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pmaxsb256 ((__v32qi)__A, (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pmaxsw256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pmaxsd256 ((__v8si)__A, (__v8si)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_epu8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pmaxub256 ((__v32qi)__A, (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_epu16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pmaxuw256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_epu32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pmaxud256 ((__v8si)__A, (__v8si)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pminsb256 ((__v32qi)__A, (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pminsw256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pminsd256 ((__v8si)__A, (__v8si)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_epu8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pminub256 ((__v32qi)__A, (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_epu16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_epu32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pminud256 ((__v8si)__A, (__v8si)__B); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movemask_epi8 (__m256i __A) +{ + return __builtin_ia32_pmovmskb256 ((__v32qi)__A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi8_epi16 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pmovsxbw256 ((__v16qi)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi8_epi32 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pmovsxbd256 ((__v16qi)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi8_epi64 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pmovsxbq256 ((__v16qi)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi16_epi32 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pmovsxwd256 ((__v8hi)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi16_epi64 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pmovsxwq256 ((__v8hi)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi32_epi64 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pmovsxdq256 ((__v4si)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu8_epi16 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pmovzxbw256 ((__v16qi)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu8_epi32 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pmovzxbd256 ((__v16qi)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu8_epi64 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pmovzxbq256 ((__v16qi)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu16_epi32 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pmovzxwd256 ((__v8hi)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu16_epi64 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pmovzxwq256 ((__v8hi)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu32_epi64 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pmovzxdq256 ((__v4si)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mul_epi32 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmuldq256 ((__v8si)__X, (__v8si)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mulhrs_epi16 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmulhrsw256 ((__v16hi)__X, + (__v16hi)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mulhi_epu16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pmulhuw256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mulhi_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pmulhw256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mullo_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v16hu)__A * (__v16hu)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mullo_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v8su)__A * (__v8su)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mul_epu32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_pmuludq256 ((__v8si)__A, (__v8si)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_or_si256 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v4du)__A | (__v4du)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sad_epu8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psadbw256 ((__v32qi)__A, (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shuffle_epi8 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_pshufb256 ((__v32qi)__X, + (__v32qi)__Y); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shuffle_epi32 (__m256i __A, const int __mask) +{ + return (__m256i)__builtin_ia32_pshufd256 ((__v8si)__A, __mask); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shufflehi_epi16 (__m256i __A, const int __mask) +{ + return (__m256i)__builtin_ia32_pshufhw256 ((__v16hi)__A, __mask); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shufflelo_epi16 (__m256i __A, const int __mask) +{ + return (__m256i)__builtin_ia32_pshuflw256 ((__v16hi)__A, __mask); +} +#else +#define _mm256_shuffle_epi32(A, N) \ + ((__m256i)__builtin_ia32_pshufd256 ((__v8si)(__m256i)(A), (int)(N))) +#define _mm256_shufflehi_epi16(A, N) \ + ((__m256i)__builtin_ia32_pshufhw256 ((__v16hi)(__m256i)(A), (int)(N))) +#define _mm256_shufflelo_epi16(A, N) \ + ((__m256i)__builtin_ia32_pshuflw256 ((__v16hi)(__m256i)(A), (int)(N))) +#endif + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sign_epi8 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psignb256 ((__v32qi)__X, (__v32qi)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sign_epi16 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psignw256 ((__v16hi)__X, (__v16hi)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sign_epi32 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psignd256 ((__v8si)__X, (__v8si)__Y); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_bslli_epi128 (__m256i __A, const int __N) +{ + return (__m256i)__builtin_ia32_pslldqi256 (__A, __N * 8); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_slli_si256 (__m256i __A, const int __N) +{ + return (__m256i)__builtin_ia32_pslldqi256 (__A, __N * 8); +} +#else +#define _mm256_bslli_epi128(A, N) \ + ((__m256i)__builtin_ia32_pslldqi256 ((__m256i)(A), (int)(N) * 8)) +#define _mm256_slli_si256(A, N) \ + ((__m256i)__builtin_ia32_pslldqi256 ((__m256i)(A), (int)(N) * 8)) +#endif + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_slli_epi16 (__m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_psllwi256 ((__v16hi)__A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sll_epi16 (__m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_psllw256((__v16hi)__A, (__v8hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_slli_epi32 (__m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_pslldi256 ((__v8si)__A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sll_epi32 (__m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_pslld256((__v8si)__A, (__v4si)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_slli_epi64 (__m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_psllqi256 ((__v4di)__A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sll_epi64 (__m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_psllq256((__v4di)__A, (__v2di)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srai_epi16 (__m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_psrawi256 ((__v16hi)__A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sra_epi16 (__m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_psraw256 ((__v16hi)__A, (__v8hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srai_epi32 (__m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_psradi256 ((__v8si)__A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sra_epi32 (__m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_psrad256 ((__v8si)__A, (__v4si)__B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_bsrli_epi128 (__m256i __A, const int __N) +{ + return (__m256i)__builtin_ia32_psrldqi256 (__A, __N * 8); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srli_si256 (__m256i __A, const int __N) +{ + return (__m256i)__builtin_ia32_psrldqi256 (__A, __N * 8); +} +#else +#define _mm256_bsrli_epi128(A, N) \ + ((__m256i)__builtin_ia32_psrldqi256 ((__m256i)(A), (int)(N) * 8)) +#define _mm256_srli_si256(A, N) \ + ((__m256i)__builtin_ia32_psrldqi256 ((__m256i)(A), (int)(N) * 8)) +#endif + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srli_epi16 (__m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_psrlwi256 ((__v16hi)__A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srl_epi16 (__m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_psrlw256((__v16hi)__A, (__v8hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srli_epi32 (__m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_psrldi256 ((__v8si)__A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srl_epi32 (__m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_psrld256((__v8si)__A, (__v4si)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srli_epi64 (__m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_psrlqi256 ((__v4di)__A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srl_epi64 (__m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_psrlq256((__v4di)__A, (__v2di)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sub_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v32qu)__A - (__v32qu)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sub_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v16hu)__A - (__v16hu)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sub_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v8su)__A - (__v8su)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sub_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v4du)__A - (__v4du)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_subs_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psubsb256 ((__v32qi)__A, (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_subs_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psubsw256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_subs_epu8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psubusb256 ((__v32qi)__A, (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_subs_epu16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psubusw256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpackhi_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_punpckhbw256 ((__v32qi)__A, (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpackhi_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_punpckhwd256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpackhi_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_punpckhdq256 ((__v8si)__A, (__v8si)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpackhi_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_punpckhqdq256 ((__v4di)__A, (__v4di)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpacklo_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_punpcklbw256 ((__v32qi)__A, (__v32qi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpacklo_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_punpcklwd256 ((__v16hi)__A, (__v16hi)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpacklo_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_punpckldq256 ((__v8si)__A, (__v8si)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpacklo_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_punpcklqdq256 ((__v4di)__A, (__v4di)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_xor_si256 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v4du)__A ^ (__v4du)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_stream_load_si256 (__m256i const *__X) +{ + return (__m256i) __builtin_ia32_movntdqa256 ((__v4di *) __X); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_broadcastss_ps (__m128 __X) +{ + return (__m128) __builtin_ia32_vbroadcastss_ps ((__v4sf)__X); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcastss_ps (__m128 __X) +{ + return (__m256) __builtin_ia32_vbroadcastss_ps256 ((__v4sf)__X); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcastsd_pd (__m128d __X) +{ + return (__m256d) __builtin_ia32_vbroadcastsd_pd256 ((__v2df)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcastsi128_si256 (__m128i __X) +{ + return (__m256i) __builtin_ia32_vbroadcastsi256 ((__v2di)__X); +} + +#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X) +#define _mm_broadcastsd_pd(X) _mm_movedup_pd(X) + +#ifdef __OPTIMIZE__ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blend_epi32 (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_pblendd128 ((__v4si)__X, + (__v4si)__Y, + __M); +} +#else +#define _mm_blend_epi32(X, Y, M) \ + ((__m128i) __builtin_ia32_pblendd128 ((__v4si)(__m128i)(X), \ + (__v4si)(__m128i)(Y), (int)(M))) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_blend_epi32 (__m256i __X, __m256i __Y, const int __M) +{ + return (__m256i) __builtin_ia32_pblendd256 ((__v8si)__X, + (__v8si)__Y, + __M); +} +#else +#define _mm256_blend_epi32(X, Y, M) \ + ((__m256i) __builtin_ia32_pblendd256 ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), (int)(M))) +#endif + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcastb_epi8 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pbroadcastb256 ((__v16qi)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcastw_epi16 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pbroadcastw256 ((__v8hi)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcastd_epi32 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pbroadcastd256 ((__v4si)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcastq_epi64 (__m128i __X) +{ + return (__m256i) __builtin_ia32_pbroadcastq256 ((__v2di)__X); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_broadcastb_epi8 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pbroadcastb128 ((__v16qi)__X); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_broadcastw_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pbroadcastw128 ((__v8hi)__X); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_broadcastd_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pbroadcastd128 ((__v4si)__X); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_broadcastq_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pbroadcastq128 ((__v2di)__X); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutevar8x32_epi32 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvarsi256 ((__v8si)__X, (__v8si)__Y); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute4x64_pd (__m256d __X, const int __M) +{ + return (__m256d) __builtin_ia32_permdf256 ((__v4df)__X, __M); +} +#else +#define _mm256_permute4x64_pd(X, M) \ + ((__m256d) __builtin_ia32_permdf256 ((__v4df)(__m256d)(X), (int)(M))) +#endif + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutevar8x32_ps (__m256 __X, __m256i __Y) +{ + return (__m256) __builtin_ia32_permvarsf256 ((__v8sf)__X, (__v8si)__Y); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute4x64_epi64 (__m256i __X, const int __M) +{ + return (__m256i) __builtin_ia32_permdi256 ((__v4di)__X, __M); +} +#else +#define _mm256_permute4x64_epi64(X, M) \ + ((__m256i) __builtin_ia32_permdi256 ((__v4di)(__m256i)(X), (int)(M))) +#endif + + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute2x128_si256 (__m256i __X, __m256i __Y, const int __M) +{ + return (__m256i) __builtin_ia32_permti256 ((__v4di)__X, (__v4di)__Y, __M); +} +#else +#define _mm256_permute2x128_si256(X, Y, M) \ + ((__m256i) __builtin_ia32_permti256 ((__v4di)(__m256i)(X), (__v4di)(__m256i)(Y), (int)(M))) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extracti128_si256 (__m256i __X, const int __M) +{ + return (__m128i) __builtin_ia32_extract128i256 ((__v4di)__X, __M); +} +#else +#define _mm256_extracti128_si256(X, M) \ + ((__m128i) __builtin_ia32_extract128i256 ((__v4di)(__m256i)(X), (int)(M))) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_inserti128_si256 (__m256i __X, __m128i __Y, const int __M) +{ + return (__m256i) __builtin_ia32_insert128i256 ((__v4di)__X, (__v2di)__Y, __M); +} +#else +#define _mm256_inserti128_si256(X, Y, M) \ + ((__m256i) __builtin_ia32_insert128i256 ((__v4di)(__m256i)(X), \ + (__v2di)(__m128i)(Y), \ + (int)(M))) +#endif + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskload_epi32 (int const *__X, __m256i __M ) +{ + return (__m256i) __builtin_ia32_maskloadd256 ((const __v8si *)__X, + (__v8si)__M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskload_epi64 (long long const *__X, __m256i __M ) +{ + return (__m256i) __builtin_ia32_maskloadq256 ((const __v4di *)__X, + (__v4di)__M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskload_epi32 (int const *__X, __m128i __M ) +{ + return (__m128i) __builtin_ia32_maskloadd ((const __v4si *)__X, + (__v4si)__M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskload_epi64 (long long const *__X, __m128i __M ) +{ + return (__m128i) __builtin_ia32_maskloadq ((const __v2di *)__X, + (__v2di)__M); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskstore_epi32 (int *__X, __m256i __M, __m256i __Y ) +{ + __builtin_ia32_maskstored256 ((__v8si *)__X, (__v8si)__M, (__v8si)__Y); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskstore_epi64 (long long *__X, __m256i __M, __m256i __Y ) +{ + __builtin_ia32_maskstoreq256 ((__v4di *)__X, (__v4di)__M, (__v4di)__Y); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskstore_epi32 (int *__X, __m128i __M, __m128i __Y ) +{ + __builtin_ia32_maskstored ((__v4si *)__X, (__v4si)__M, (__v4si)__Y); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskstore_epi64 (long long *__X, __m128i __M, __m128i __Y ) +{ + __builtin_ia32_maskstoreq (( __v2di *)__X, (__v2di)__M, (__v2di)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sllv_epi32 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psllv8si ((__v8si)__X, (__v8si)__Y); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sllv_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psllv4si ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sllv_epi64 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psllv4di ((__v4di)__X, (__v4di)__Y); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sllv_epi64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psllv2di ((__v2di)__X, (__v2di)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srav_epi32 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psrav8si ((__v8si)__X, (__v8si)__Y); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srav_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psrav4si ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srlv_epi32 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psrlv8si ((__v8si)__X, (__v8si)__Y); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srlv_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psrlv4si ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srlv_epi64 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psrlv4di ((__v4di)__X, (__v4di)__Y); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srlv_epi64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psrlv2di ((__v2di)__X, (__v2di)__Y); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i32gather_pd (double const *__base, __m128i __index, const int __scale) +{ + __v2df __zero = _mm_setzero_pd (); + __v2df __mask = _mm_cmpeq_pd (__zero, __zero); + + return (__m128d) __builtin_ia32_gathersiv2df (_mm_undefined_pd (), + __base, + (__v4si)__index, + __mask, + __scale); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i32gather_pd (__m128d __src, double const *__base, __m128i __index, + __m128d __mask, const int __scale) +{ + return (__m128d) __builtin_ia32_gathersiv2df ((__v2df)__src, + __base, + (__v4si)__index, + (__v2df)__mask, + __scale); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i32gather_pd (double const *__base, __m128i __index, const int __scale) +{ + __v4df __zero = _mm256_setzero_pd (); + __v4df __mask = _mm256_cmp_pd (__zero, __zero, _CMP_EQ_OQ); + + return (__m256d) __builtin_ia32_gathersiv4df (_mm256_undefined_pd (), + __base, + (__v4si)__index, + __mask, + __scale); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i32gather_pd (__m256d __src, double const *__base, + __m128i __index, __m256d __mask, const int __scale) +{ + return (__m256d) __builtin_ia32_gathersiv4df ((__v4df)__src, + __base, + (__v4si)__index, + (__v4df)__mask, + __scale); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i64gather_pd (double const *__base, __m128i __index, const int __scale) +{ + __v2df __src = _mm_setzero_pd (); + __v2df __mask = _mm_cmpeq_pd (__src, __src); + + return (__m128d) __builtin_ia32_gatherdiv2df (__src, + __base, + (__v2di)__index, + __mask, + __scale); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i64gather_pd (__m128d __src, double const *__base, __m128i __index, + __m128d __mask, const int __scale) +{ + return (__m128d) __builtin_ia32_gatherdiv2df ((__v2df)__src, + __base, + (__v2di)__index, + (__v2df)__mask, + __scale); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i64gather_pd (double const *__base, __m256i __index, const int __scale) +{ + __v4df __src = _mm256_setzero_pd (); + __v4df __mask = _mm256_cmp_pd (__src, __src, _CMP_EQ_OQ); + + return (__m256d) __builtin_ia32_gatherdiv4df (__src, + __base, + (__v4di)__index, + __mask, + __scale); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i64gather_pd (__m256d __src, double const *__base, + __m256i __index, __m256d __mask, const int __scale) +{ + return (__m256d) __builtin_ia32_gatherdiv4df ((__v4df)__src, + __base, + (__v4di)__index, + (__v4df)__mask, + __scale); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i32gather_ps (float const *__base, __m128i __index, const int __scale) +{ + __v4sf __src = _mm_setzero_ps (); + __v4sf __mask = _mm_cmpeq_ps (__src, __src); + + return (__m128) __builtin_ia32_gathersiv4sf (__src, + __base, + (__v4si)__index, + __mask, + __scale); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i32gather_ps (__m128 __src, float const *__base, __m128i __index, + __m128 __mask, const int __scale) +{ + return (__m128) __builtin_ia32_gathersiv4sf ((__v4sf)__src, + __base, + (__v4si)__index, + (__v4sf)__mask, + __scale); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i32gather_ps (float const *__base, __m256i __index, const int __scale) +{ + __v8sf __src = _mm256_setzero_ps (); + __v8sf __mask = _mm256_cmp_ps (__src, __src, _CMP_EQ_OQ); + + return (__m256) __builtin_ia32_gathersiv8sf (__src, + __base, + (__v8si)__index, + __mask, + __scale); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i32gather_ps (__m256 __src, float const *__base, + __m256i __index, __m256 __mask, const int __scale) +{ + return (__m256) __builtin_ia32_gathersiv8sf ((__v8sf)__src, + __base, + (__v8si)__index, + (__v8sf)__mask, + __scale); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i64gather_ps (float const *__base, __m128i __index, const int __scale) +{ + __v4sf __src = _mm_setzero_ps (); + __v4sf __mask = _mm_cmpeq_ps (__src, __src); + + return (__m128) __builtin_ia32_gatherdiv4sf (__src, + __base, + (__v2di)__index, + __mask, + __scale); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i64gather_ps (__m128 __src, float const *__base, __m128i __index, + __m128 __mask, const int __scale) +{ + return (__m128) __builtin_ia32_gatherdiv4sf ((__v4sf)__src, + __base, + (__v2di)__index, + (__v4sf)__mask, + __scale); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i64gather_ps (float const *__base, __m256i __index, const int __scale) +{ + __v4sf __src = _mm_setzero_ps (); + __v4sf __mask = _mm_cmpeq_ps (__src, __src); + + return (__m128) __builtin_ia32_gatherdiv4sf256 (__src, + __base, + (__v4di)__index, + __mask, + __scale); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i64gather_ps (__m128 __src, float const *__base, + __m256i __index, __m128 __mask, const int __scale) +{ + return (__m128) __builtin_ia32_gatherdiv4sf256 ((__v4sf)__src, + __base, + (__v4di)__index, + (__v4sf)__mask, + __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i32gather_epi64 (long long int const *__base, + __m128i __index, const int __scale) +{ + __v2di __src = __extension__ (__v2di){ 0, 0 }; + __v2di __mask = __extension__ (__v2di){ ~0, ~0 }; + + return (__m128i) __builtin_ia32_gathersiv2di (__src, + __base, + (__v4si)__index, + __mask, + __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i32gather_epi64 (__m128i __src, long long int const *__base, + __m128i __index, __m128i __mask, const int __scale) +{ + return (__m128i) __builtin_ia32_gathersiv2di ((__v2di)__src, + __base, + (__v4si)__index, + (__v2di)__mask, + __scale); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i32gather_epi64 (long long int const *__base, + __m128i __index, const int __scale) +{ + __v4di __src = __extension__ (__v4di){ 0, 0, 0, 0 }; + __v4di __mask = __extension__ (__v4di){ ~0, ~0, ~0, ~0 }; + + return (__m256i) __builtin_ia32_gathersiv4di (__src, + __base, + (__v4si)__index, + __mask, + __scale); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i32gather_epi64 (__m256i __src, long long int const *__base, + __m128i __index, __m256i __mask, + const int __scale) +{ + return (__m256i) __builtin_ia32_gathersiv4di ((__v4di)__src, + __base, + (__v4si)__index, + (__v4di)__mask, + __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i64gather_epi64 (long long int const *__base, + __m128i __index, const int __scale) +{ + __v2di __src = __extension__ (__v2di){ 0, 0 }; + __v2di __mask = __extension__ (__v2di){ ~0, ~0 }; + + return (__m128i) __builtin_ia32_gatherdiv2di (__src, + __base, + (__v2di)__index, + __mask, + __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i64gather_epi64 (__m128i __src, long long int const *__base, + __m128i __index, __m128i __mask, const int __scale) +{ + return (__m128i) __builtin_ia32_gatherdiv2di ((__v2di)__src, + __base, + (__v2di)__index, + (__v2di)__mask, + __scale); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i64gather_epi64 (long long int const *__base, + __m256i __index, const int __scale) +{ + __v4di __src = __extension__ (__v4di){ 0, 0, 0, 0 }; + __v4di __mask = __extension__ (__v4di){ ~0, ~0, ~0, ~0 }; + + return (__m256i) __builtin_ia32_gatherdiv4di (__src, + __base, + (__v4di)__index, + __mask, + __scale); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i64gather_epi64 (__m256i __src, long long int const *__base, + __m256i __index, __m256i __mask, + const int __scale) +{ + return (__m256i) __builtin_ia32_gatherdiv4di ((__v4di)__src, + __base, + (__v4di)__index, + (__v4di)__mask, + __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i32gather_epi32 (int const *__base, __m128i __index, const int __scale) +{ + __v4si __src = __extension__ (__v4si){ 0, 0, 0, 0 }; + __v4si __mask = __extension__ (__v4si){ ~0, ~0, ~0, ~0 }; + + return (__m128i) __builtin_ia32_gathersiv4si (__src, + __base, + (__v4si)__index, + __mask, + __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i32gather_epi32 (__m128i __src, int const *__base, __m128i __index, + __m128i __mask, const int __scale) +{ + return (__m128i) __builtin_ia32_gathersiv4si ((__v4si)__src, + __base, + (__v4si)__index, + (__v4si)__mask, + __scale); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i32gather_epi32 (int const *__base, __m256i __index, const int __scale) +{ + __v8si __src = __extension__ (__v8si){ 0, 0, 0, 0, 0, 0, 0, 0 }; + __v8si __mask = __extension__ (__v8si){ ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0 }; + + return (__m256i) __builtin_ia32_gathersiv8si (__src, + __base, + (__v8si)__index, + __mask, + __scale); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i32gather_epi32 (__m256i __src, int const *__base, + __m256i __index, __m256i __mask, + const int __scale) +{ + return (__m256i) __builtin_ia32_gathersiv8si ((__v8si)__src, + __base, + (__v8si)__index, + (__v8si)__mask, + __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i64gather_epi32 (int const *__base, __m128i __index, const int __scale) +{ + __v4si __src = __extension__ (__v4si){ 0, 0, 0, 0 }; + __v4si __mask = __extension__ (__v4si){ ~0, ~0, ~0, ~0 }; + + return (__m128i) __builtin_ia32_gatherdiv4si (__src, + __base, + (__v2di)__index, + __mask, + __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i64gather_epi32 (__m128i __src, int const *__base, __m128i __index, + __m128i __mask, const int __scale) +{ + return (__m128i) __builtin_ia32_gatherdiv4si ((__v4si)__src, + __base, + (__v2di)__index, + (__v4si)__mask, + __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i64gather_epi32 (int const *__base, __m256i __index, const int __scale) +{ + __v4si __src = __extension__ (__v4si){ 0, 0, 0, 0 }; + __v4si __mask = __extension__ (__v4si){ ~0, ~0, ~0, ~0 }; + + return (__m128i) __builtin_ia32_gatherdiv4si256 (__src, + __base, + (__v4di)__index, + __mask, + __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i64gather_epi32 (__m128i __src, int const *__base, + __m256i __index, __m128i __mask, + const int __scale) +{ + return (__m128i) __builtin_ia32_gatherdiv4si256 ((__v4si)__src, + __base, + (__v4di)__index, + (__v4si)__mask, + __scale); +} +#else /* __OPTIMIZE__ */ +#define _mm_i32gather_pd(BASE, INDEX, SCALE) \ + (__m128d) __builtin_ia32_gathersiv2df ((__v2df) _mm_setzero_pd (), \ + (double const *) (BASE), \ + (__v4si)(__m128i) (INDEX), \ + (__v2df) \ + _mm_cmpeq_pd (_mm_setzero_pd (),\ + _mm_setzero_pd ()),\ + (int) (SCALE)) + +#define _mm_mask_i32gather_pd(SRC, BASE, INDEX, MASK, SCALE) \ + (__m128d) __builtin_ia32_gathersiv2df ((__v2df)(__m128d) (SRC), \ + (double const *) (BASE), \ + (__v4si)(__m128i) (INDEX), \ + (__v2df)(__m128d) (MASK), \ + (int) (SCALE)) + +#define _mm256_i32gather_pd(BASE, INDEX, SCALE) \ + (__m256d) __builtin_ia32_gathersiv4df ((__v4df) _mm256_setzero_pd (), \ + (double const *) (BASE), \ + (__v4si)(__m128i) (INDEX), \ + (__v4df) \ + _mm256_cmp_pd (_mm256_setzero_pd (),\ + _mm256_setzero_pd (),\ + _CMP_EQ_OQ), \ + (int) (SCALE)) + +#define _mm256_mask_i32gather_pd(SRC, BASE, INDEX, MASK, SCALE) \ + (__m256d) __builtin_ia32_gathersiv4df ((__v4df)(__m256d) (SRC), \ + (double const *) (BASE), \ + (__v4si)(__m128i) (INDEX), \ + (__v4df)(__m256d) (MASK), \ + (int) (SCALE)) + +#define _mm_i64gather_pd(BASE, INDEX, SCALE) \ + (__m128d) __builtin_ia32_gatherdiv2df ((__v2df) _mm_setzero_pd (), \ + (double const *) (BASE), \ + (__v2di)(__m128i) (INDEX), \ + (__v2df) \ + _mm_cmpeq_pd (_mm_setzero_pd (),\ + _mm_setzero_pd ()),\ + (int) (SCALE)) + +#define _mm_mask_i64gather_pd(SRC, BASE, INDEX, MASK, SCALE) \ + (__m128d) __builtin_ia32_gatherdiv2df ((__v2df)(__m128d) (SRC), \ + (double const *) (BASE), \ + (__v2di)(__m128i) (INDEX), \ + (__v2df)(__m128d) (MASK), \ + (int) (SCALE)) + +#define _mm256_i64gather_pd(BASE, INDEX, SCALE) \ + (__m256d) __builtin_ia32_gatherdiv4df ((__v4df) _mm256_setzero_pd (), \ + (double const *) (BASE), \ + (__v4di)(__m256i) (INDEX), \ + (__v4df) \ + _mm256_cmp_pd (_mm256_setzero_pd (),\ + _mm256_setzero_pd (),\ + _CMP_EQ_OQ), \ + (int) (SCALE)) + +#define _mm256_mask_i64gather_pd(SRC, BASE, INDEX, MASK, SCALE) \ + (__m256d) __builtin_ia32_gatherdiv4df ((__v4df)(__m256d) (SRC), \ + (double const *) (BASE), \ + (__v4di)(__m256i) (INDEX), \ + (__v4df)(__m256d) (MASK), \ + (int) (SCALE)) + +#define _mm_i32gather_ps(BASE, INDEX, SCALE) \ + (__m128) __builtin_ia32_gathersiv4sf ((__v4sf) _mm_setzero_ps (), \ + (float const *) (BASE), \ + (__v4si)(__m128i) (INDEX), \ + (__v4sf) \ + _mm_cmpeq_ps (_mm_setzero_ps (),\ + _mm_setzero_ps ()),\ + (int) (SCALE)) + +#define _mm_mask_i32gather_ps(SRC, BASE, INDEX, MASK, SCALE) \ + (__m128) __builtin_ia32_gathersiv4sf ((__v4sf)(__m128) (SRC), \ + (float const *) (BASE), \ + (__v4si)(__m128i) (INDEX), \ + (__v4sf)(__m128) (MASK), \ + (int) (SCALE)) + +#define _mm256_i32gather_ps(BASE, INDEX, SCALE) \ + (__m256) __builtin_ia32_gathersiv8sf ((__v8sf) _mm256_setzero_ps (), \ + (float const *) (BASE), \ + (__v8si)(__m256i) (INDEX), \ + (__v8sf) \ + _mm256_cmp_ps (_mm256_setzero_ps (),\ + _mm256_setzero_ps (),\ + _CMP_EQ_OQ), \ + (int) (SCALE)) + +#define _mm256_mask_i32gather_ps(SRC, BASE, INDEX, MASK, SCALE) \ + (__m256) __builtin_ia32_gathersiv8sf ((__v8sf)(__m256) (SRC), \ + (float const *) (BASE), \ + (__v8si)(__m256i) (INDEX), \ + (__v8sf)(__m256) (MASK), \ + (int) (SCALE)) + +#define _mm_i64gather_ps(BASE, INDEX, SCALE) \ + (__m128) __builtin_ia32_gatherdiv4sf ((__v4sf) _mm_setzero_pd (), \ + (float const *) (BASE), \ + (__v2di)(__m128i) (INDEX), \ + (__v4sf) \ + _mm_cmpeq_ps (_mm_setzero_ps (),\ + _mm_setzero_ps ()),\ + (int) (SCALE)) + +#define _mm_mask_i64gather_ps(SRC, BASE, INDEX, MASK, SCALE) \ + (__m128) __builtin_ia32_gatherdiv4sf ((__v4sf)(__m128) (SRC), \ + (float const *) (BASE), \ + (__v2di)(__m128i) (INDEX), \ + (__v4sf)(__m128) (MASK), \ + (int) (SCALE)) + +#define _mm256_i64gather_ps(BASE, INDEX, SCALE) \ + (__m128) __builtin_ia32_gatherdiv4sf256 ((__v4sf) _mm_setzero_ps (), \ + (float const *) (BASE), \ + (__v4di)(__m256i) (INDEX), \ + (__v4sf) \ + _mm_cmpeq_ps (_mm_setzero_ps (),\ + _mm_setzero_ps ()),\ + (int) (SCALE)) + +#define _mm256_mask_i64gather_ps(SRC, BASE, INDEX, MASK, SCALE) \ + (__m128) __builtin_ia32_gatherdiv4sf256 ((__v4sf)(__m128) (SRC), \ + (float const *) (BASE), \ + (__v4di)(__m256i) (INDEX), \ + (__v4sf)(__m128) (MASK), \ + (int) (SCALE)) + +#define _mm_i32gather_epi64(BASE, INDEX, SCALE) \ + (__m128i) __builtin_ia32_gathersiv2di ((__v2di) _mm_setzero_si128 (), \ + (long long const *) (BASE), \ + (__v4si)(__m128i) (INDEX), \ + (__v2di)_mm_set1_epi64x (-1), \ + (int) (SCALE)) + +#define _mm_mask_i32gather_epi64(SRC, BASE, INDEX, MASK, SCALE) \ + (__m128i) __builtin_ia32_gathersiv2di ((__v2di)(__m128i) (SRC), \ + (long long const *) (BASE), \ + (__v4si)(__m128i) (INDEX), \ + (__v2di)(__m128i) (MASK), \ + (int) (SCALE)) + +#define _mm256_i32gather_epi64(BASE, INDEX, SCALE) \ + (__m256i) __builtin_ia32_gathersiv4di ((__v4di) _mm256_setzero_si256 (), \ + (long long const *) (BASE), \ + (__v4si)(__m128i) (INDEX), \ + (__v4di)_mm256_set1_epi64x (-1), \ + (int) (SCALE)) + +#define _mm256_mask_i32gather_epi64(SRC, BASE, INDEX, MASK, SCALE) \ + (__m256i) __builtin_ia32_gathersiv4di ((__v4di)(__m256i) (SRC), \ + (long long const *) (BASE), \ + (__v4si)(__m128i) (INDEX), \ + (__v4di)(__m256i) (MASK), \ + (int) (SCALE)) + +#define _mm_i64gather_epi64(BASE, INDEX, SCALE) \ + (__m128i) __builtin_ia32_gatherdiv2di ((__v2di) _mm_setzero_si128 (), \ + (long long const *) (BASE), \ + (__v2di)(__m128i) (INDEX), \ + (__v2di)_mm_set1_epi64x (-1), \ + (int) (SCALE)) + +#define _mm_mask_i64gather_epi64(SRC, BASE, INDEX, MASK, SCALE) \ + (__m128i) __builtin_ia32_gatherdiv2di ((__v2di)(__m128i) (SRC), \ + (long long const *) (BASE), \ + (__v2di)(__m128i) (INDEX), \ + (__v2di)(__m128i) (MASK), \ + (int) (SCALE)) + +#define _mm256_i64gather_epi64(BASE, INDEX, SCALE) \ + (__m256i) __builtin_ia32_gatherdiv4di ((__v4di) _mm256_setzero_si256 (), \ + (long long const *) (BASE), \ + (__v4di)(__m256i) (INDEX), \ + (__v4di)_mm256_set1_epi64x (-1), \ + (int) (SCALE)) + +#define _mm256_mask_i64gather_epi64(SRC, BASE, INDEX, MASK, SCALE) \ + (__m256i) __builtin_ia32_gatherdiv4di ((__v4di)(__m256i) (SRC), \ + (long long const *) (BASE), \ + (__v4di)(__m256i) (INDEX), \ + (__v4di)(__m256i) (MASK), \ + (int) (SCALE)) + +#define _mm_i32gather_epi32(BASE, INDEX, SCALE) \ + (__m128i) __builtin_ia32_gathersiv4si ((__v4si) _mm_setzero_si128 (), \ + (int const *) (BASE), \ + (__v4si)(__m128i) (INDEX), \ + (__v4si)_mm_set1_epi32 (-1), \ + (int) (SCALE)) + +#define _mm_mask_i32gather_epi32(SRC, BASE, INDEX, MASK, SCALE) \ + (__m128i) __builtin_ia32_gathersiv4si ((__v4si)(__m128i) (SRC), \ + (int const *) (BASE), \ + (__v4si)(__m128i) (INDEX), \ + (__v4si)(__m128i) (MASK), \ + (int) (SCALE)) + +#define _mm256_i32gather_epi32(BASE, INDEX, SCALE) \ + (__m256i) __builtin_ia32_gathersiv8si ((__v8si) _mm256_setzero_si256 (), \ + (int const *) (BASE), \ + (__v8si)(__m256i) (INDEX), \ + (__v8si)_mm256_set1_epi32 (-1), \ + (int) (SCALE)) + +#define _mm256_mask_i32gather_epi32(SRC, BASE, INDEX, MASK, SCALE) \ + (__m256i) __builtin_ia32_gathersiv8si ((__v8si)(__m256i) (SRC), \ + (int const *) (BASE), \ + (__v8si)(__m256i) (INDEX), \ + (__v8si)(__m256i) (MASK), \ + (int) (SCALE)) + +#define _mm_i64gather_epi32(BASE, INDEX, SCALE) \ + (__m128i) __builtin_ia32_gatherdiv4si ((__v4si) _mm_setzero_si128 (), \ + (int const *) (BASE), \ + (__v2di)(__m128i) (INDEX), \ + (__v4si)_mm_set1_epi32 (-1), \ + (int) (SCALE)) + +#define _mm_mask_i64gather_epi32(SRC, BASE, INDEX, MASK, SCALE) \ + (__m128i) __builtin_ia32_gatherdiv4si ((__v4si)(__m128i) (SRC), \ + (int const *) (BASE), \ + (__v2di)(__m128i) (INDEX), \ + (__v4si)(__m128i) (MASK), \ + (int) (SCALE)) + +#define _mm256_i64gather_epi32(BASE, INDEX, SCALE) \ + (__m128i) __builtin_ia32_gatherdiv4si256 ((__v4si) _mm_setzero_si128 (), \ + (int const *) (BASE), \ + (__v4di)(__m256i) (INDEX), \ + (__v4si)_mm_set1_epi32(-1), \ + (int) (SCALE)) + +#define _mm256_mask_i64gather_epi32(SRC, BASE, INDEX, MASK, SCALE) \ + (__m128i) __builtin_ia32_gatherdiv4si256 ((__v4si)(__m128i) (SRC), \ + (int const *) (BASE), \ + (__v4di)(__m256i) (INDEX), \ + (__v4si)(__m128i) (MASK), \ + (int) (SCALE)) +#endif /* __OPTIMIZE__ */ + +#ifdef __DISABLE_AVX2__ +#undef __DISABLE_AVX2__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX2__ */ + +#endif /* _AVX2INTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx5124fmapsintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx5124fmapsintrin.h new file mode 100644 index 0000000..872146e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx5124fmapsintrin.h @@ -0,0 +1,216 @@ +/* Copyright (C) 2015-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _IMMINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _AVX5124FMAPSINTRIN_H_INCLUDED +#define _AVX5124FMAPSINTRIN_H_INCLUDED + +#ifndef __AVX5124FMAPS__ +#pragma GCC push_options +#pragma GCC target("avx5124fmaps") +#define __DISABLE_AVX5124FMAPS__ +#endif /* __AVX5124FMAPS__ */ + +extern __inline __m512 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_4fmadd_ps (__m512 __A, __m512 __B, __m512 __C, + __m512 __D, __m512 __E, __m128 *__F) +{ + return (__m512) __builtin_ia32_4fmaddps ((__v16sf) __B, + (__v16sf) __C, + (__v16sf) __D, + (__v16sf) __E, + (__v16sf) __A, + (const __v4sf *) __F); +} + +extern __inline __m512 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_4fmadd_ps (__m512 __A, __mmask16 __U, __m512 __B, + __m512 __C, __m512 __D, __m512 __E, __m128 *__F) +{ + return (__m512) __builtin_ia32_4fmaddps_mask ((__v16sf) __B, + (__v16sf) __C, + (__v16sf) __D, + (__v16sf) __E, + (__v16sf) __A, + (const __v4sf *) __F, + (__v16sf) __A, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_4fmadd_ps (__mmask16 __U, + __m512 __A, __m512 __B, __m512 __C, + __m512 __D, __m512 __E, __m128 *__F) +{ + return (__m512) __builtin_ia32_4fmaddps_mask ((__v16sf) __B, + (__v16sf) __C, + (__v16sf) __D, + (__v16sf) __E, + (__v16sf) __A, + (const __v4sf *) __F, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_4fmadd_ss (__m128 __A, __m128 __B, __m128 __C, + __m128 __D, __m128 __E, __m128 *__F) +{ + return (__m128) __builtin_ia32_4fmaddss ((__v4sf) __B, + (__v4sf) __C, + (__v4sf) __D, + (__v4sf) __E, + (__v4sf) __A, + (const __v4sf *) __F); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_4fmadd_ss (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C, + __m128 __D, __m128 __E, __m128 *__F) +{ + return (__m128) __builtin_ia32_4fmaddss_mask ((__v4sf) __B, + (__v4sf) __C, + (__v4sf) __D, + (__v4sf) __E, + (__v4sf) __A, + (const __v4sf *) __F, + (__v4sf) __A, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_4fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C, + __m128 __D, __m128 __E, __m128 *__F) +{ + return (__m128) __builtin_ia32_4fmaddss_mask ((__v4sf) __B, + (__v4sf) __C, + (__v4sf) __D, + (__v4sf) __E, + (__v4sf) __A, + (const __v4sf *) __F, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_4fnmadd_ps (__m512 __A, __m512 __B, __m512 __C, + __m512 __D, __m512 __E, __m128 *__F) +{ + return (__m512) __builtin_ia32_4fnmaddps ((__v16sf) __B, + (__v16sf) __C, + (__v16sf) __D, + (__v16sf) __E, + (__v16sf) __A, + (const __v4sf *) __F); +} + +extern __inline __m512 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_4fnmadd_ps (__m512 __A, __mmask16 __U, __m512 __B, + __m512 __C, __m512 __D, __m512 __E, __m128 *__F) +{ + return (__m512) __builtin_ia32_4fnmaddps_mask ((__v16sf) __B, + (__v16sf) __C, + (__v16sf) __D, + (__v16sf) __E, + (__v16sf) __A, + (const __v4sf *) __F, + (__v16sf) __A, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_4fnmadd_ps (__mmask16 __U, + __m512 __A, __m512 __B, __m512 __C, + __m512 __D, __m512 __E, __m128 *__F) +{ + return (__m512) __builtin_ia32_4fnmaddps_mask ((__v16sf) __B, + (__v16sf) __C, + (__v16sf) __D, + (__v16sf) __E, + (__v16sf) __A, + (const __v4sf *) __F, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_4fnmadd_ss (__m128 __A, __m128 __B, __m128 __C, + __m128 __D, __m128 __E, __m128 *__F) +{ + return (__m128) __builtin_ia32_4fnmaddss ((__v4sf) __B, + (__v4sf) __C, + (__v4sf) __D, + (__v4sf) __E, + (__v4sf) __A, + (const __v4sf *) __F); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_4fnmadd_ss (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C, + __m128 __D, __m128 __E, __m128 *__F) +{ + return (__m128) __builtin_ia32_4fnmaddss_mask ((__v4sf) __B, + (__v4sf) __C, + (__v4sf) __D, + (__v4sf) __E, + (__v4sf) __A, + (const __v4sf *) __F, + (__v4sf) __A, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_4fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C, + __m128 __D, __m128 __E, __m128 *__F) +{ + return (__m128) __builtin_ia32_4fnmaddss_mask ((__v4sf) __B, + (__v4sf) __C, + (__v4sf) __D, + (__v4sf) __E, + (__v4sf) __A, + (const __v4sf *) __F, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + +#ifdef __DISABLE_AVX5124FMAPS__ +#undef __DISABLE_AVX5124FMAPS__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX5124FMAPS__ */ + +#endif /* _AVX5124FMAPSINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx5124vnniwintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx5124vnniwintrin.h new file mode 100644 index 0000000..fdeded4 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx5124vnniwintrin.h @@ -0,0 +1,132 @@ +/* Copyright (C) 2015-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _IMMINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _AVX5124VNNIWINTRIN_H_INCLUDED +#define _AVX5124VNNIWINTRIN_H_INCLUDED + +#ifndef __AVX5124VNNIW__ +#pragma GCC push_options +#pragma GCC target("avx5124vnniw") +#define __DISABLE_AVX5124VNNIW__ +#endif /* __AVX5124VNNIW__ */ + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_4dpwssd_epi32 (__m512i __A, __m512i __B, __m512i __C, + __m512i __D, __m512i __E, __m128i *__F) +{ + return (__m512i) __builtin_ia32_vp4dpwssd ((__v16si) __B, + (__v16si) __C, + (__v16si) __D, + (__v16si) __E, + (__v16si) __A, + (const __v4si *) __F); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_4dpwssd_epi32 (__m512i __A, __mmask16 __U, __m512i __B, + __m512i __C, __m512i __D, __m512i __E, + __m128i *__F) +{ + return (__m512i) __builtin_ia32_vp4dpwssd_mask ((__v16si) __B, + (__v16si) __C, + (__v16si) __D, + (__v16si) __E, + (__v16si) __A, + (const __v4si *) __F, + (__v16si) __A, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_4dpwssd_epi32 (__mmask16 __U, __m512i __A, __m512i __B, + __m512i __C, __m512i __D, __m512i __E, + __m128i *__F) +{ + return (__m512i) __builtin_ia32_vp4dpwssd_mask ((__v16si) __B, + (__v16si) __C, + (__v16si) __D, + (__v16si) __E, + (__v16si) __A, + (const __v4si *) __F, + (__v16si) _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_4dpwssds_epi32 (__m512i __A, __m512i __B, __m512i __C, + __m512i __D, __m512i __E, __m128i *__F) +{ + return (__m512i) __builtin_ia32_vp4dpwssds ((__v16si) __B, + (__v16si) __C, + (__v16si) __D, + (__v16si) __E, + (__v16si) __A, + (const __v4si *) __F); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_4dpwssds_epi32 (__m512i __A, __mmask16 __U, __m512i __B, + __m512i __C, __m512i __D, __m512i __E, + __m128i *__F) +{ + return (__m512i) __builtin_ia32_vp4dpwssds_mask ((__v16si) __B, + (__v16si) __C, + (__v16si) __D, + (__v16si) __E, + (__v16si) __A, + (const __v4si *) __F, + (__v16si) __A, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_4dpwssds_epi32 (__mmask16 __U, __m512i __A, __m512i __B, + __m512i __C, __m512i __D, __m512i __E, + __m128i *__F) +{ + return (__m512i) __builtin_ia32_vp4dpwssds_mask ((__v16si) __B, + (__v16si) __C, + (__v16si) __D, + (__v16si) __E, + (__v16si) __A, + (const __v4si *) __F, + (__v16si) _mm512_setzero_ps (), + (__mmask16) __U); +} + +#ifdef __DISABLE_AVX5124VNNIW__ +#undef __DISABLE_AVX5124VNNIW__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX5124VNNIW__ */ + +#endif /* _AVX5124VNNIWINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512bf16intrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512bf16intrin.h new file mode 100644 index 0000000..b6e9dda --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512bf16intrin.h @@ -0,0 +1,154 @@ +/* Copyright (C) 2019-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512BF16INTRIN_H_INCLUDED +#define _AVX512BF16INTRIN_H_INCLUDED + +#ifndef __AVX512BF16__ +#pragma GCC push_options +#pragma GCC target("avx512bf16") +#define __DISABLE_AVX512BF16__ +#endif /* __AVX512BF16__ */ + +/* Internal data types for implementing the intrinsics. */ +typedef short __v32bh __attribute__ ((__vector_size__ (64))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef short __m512bh __attribute__ ((__vector_size__ (64), __may_alias__)); + +/* Convert One BF16 Data to One Single Float Data. */ +extern __inline float +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsbh_ss (__bfloat16 __A) +{ + union{ float a; unsigned int b;} __tmp; + __tmp.b = ((unsigned int)(__A)) << 16; + return __tmp.a; +} + +/* vcvtne2ps2bf16 */ + +extern __inline __m512bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtne2ps_pbh (__m512 __A, __m512 __B) +{ + return (__m512bh)__builtin_ia32_cvtne2ps2bf16_v32hi(__A, __B); +} + +extern __inline __m512bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtne2ps_pbh (__m512bh __A, __mmask32 __B, __m512 __C, __m512 __D) +{ + return (__m512bh)__builtin_ia32_cvtne2ps2bf16_v32hi_mask(__C, __D, __A, __B); +} + +extern __inline __m512bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtne2ps_pbh (__mmask32 __A, __m512 __B, __m512 __C) +{ + return (__m512bh)__builtin_ia32_cvtne2ps2bf16_v32hi_maskz(__B, __C, __A); +} + +/* vcvtneps2bf16 */ + +extern __inline __m256bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtneps_pbh (__m512 __A) +{ + return (__m256bh)__builtin_ia32_cvtneps2bf16_v16sf(__A); +} + +extern __inline __m256bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtneps_pbh (__m256bh __A, __mmask16 __B, __m512 __C) +{ + return (__m256bh)__builtin_ia32_cvtneps2bf16_v16sf_mask(__C, __A, __B); +} + +extern __inline __m256bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtneps_pbh (__mmask16 __A, __m512 __B) +{ + return (__m256bh)__builtin_ia32_cvtneps2bf16_v16sf_maskz(__B, __A); +} + +/* vdpbf16ps */ + +extern __inline __m512 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_dpbf16_ps (__m512 __A, __m512bh __B, __m512bh __C) +{ + return (__m512)__builtin_ia32_dpbf16ps_v16sf(__A, __B, __C); +} + +extern __inline __m512 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_dpbf16_ps (__m512 __A, __mmask16 __B, __m512bh __C, __m512bh __D) +{ + return (__m512)__builtin_ia32_dpbf16ps_v16sf_mask(__A, __C, __D, __B); +} + +extern __inline __m512 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_dpbf16_ps (__mmask16 __A, __m512 __B, __m512bh __C, __m512bh __D) +{ + return (__m512)__builtin_ia32_dpbf16ps_v16sf_maskz(__B, __C, __D, __A); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtpbh_ps (__m256bh __A) +{ + return (__m512)_mm512_castsi512_ps ((__m512i)_mm512_slli_epi32 ( + (__m512i)_mm512_cvtepi16_epi32 ((__m256i)__A), 16)); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtpbh_ps (__mmask16 __U, __m256bh __A) +{ + return (__m512)_mm512_castsi512_ps ((__m512i) _mm512_slli_epi32 ( + (__m512i)_mm512_maskz_cvtepi16_epi32 ( + (__mmask16)__U, (__m256i)__A), 16)); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtpbh_ps (__m512 __S, __mmask16 __U, __m256bh __A) +{ + return (__m512)_mm512_castsi512_ps ((__m512i)(_mm512_mask_slli_epi32 ( + (__m512i)__S, (__mmask16)__U, + (__m512i)_mm512_cvtepi16_epi32 ((__m256i)__A), 16))); +} + +#ifdef __DISABLE_AVX512BF16__ +#undef __DISABLE_AVX512BF16__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512BF16__ */ + +#endif /* _AVX512BF16INTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512bf16vlintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512bf16vlintrin.h new file mode 100644 index 0000000..969335f --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512bf16vlintrin.h @@ -0,0 +1,246 @@ +/* Copyright (C) 2019-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512BF16VLINTRIN_H_INCLUDED +#define _AVX512BF16VLINTRIN_H_INCLUDED + +#if !defined(__AVX512VL__) || !defined(__AVX512BF16__) +#pragma GCC push_options +#pragma GCC target("avx512bf16,avx512vl") +#define __DISABLE_AVX512BF16VL__ +#endif /* __AVX512BF16__ */ + +/* Internal data types for implementing the intrinsics. */ +typedef short __v16bh __attribute__ ((__vector_size__ (32))); +typedef short __v8bh __attribute__ ((__vector_size__ (16))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef short __m256bh __attribute__ ((__vector_size__ (32), __may_alias__)); +typedef short __m128bh __attribute__ ((__vector_size__ (16), __may_alias__)); + +typedef unsigned short __bfloat16; +/* vcvtne2ps2bf16 */ + +extern __inline __m256bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtne2ps_pbh (__m256 __A, __m256 __B) +{ + return (__m256bh)__builtin_ia32_cvtne2ps2bf16_v16hi(__A, __B); +} + +extern __inline __m256bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtne2ps_pbh (__m256bh __A, __mmask16 __B, __m256 __C, __m256 __D) +{ + return (__m256bh)__builtin_ia32_cvtne2ps2bf16_v16hi_mask(__C, __D, __A, __B); +} + +extern __inline __m256bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtne2ps_pbh (__mmask16 __A, __m256 __B, __m256 __C) +{ + return (__m256bh)__builtin_ia32_cvtne2ps2bf16_v16hi_maskz(__B, __C, __A); +} + +extern __inline __m128bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtne2ps_pbh (__m128 __A, __m128 __B) +{ + return (__m128bh)__builtin_ia32_cvtne2ps2bf16_v8hi(__A, __B); +} + +extern __inline __m128bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtne2ps_pbh (__m128bh __A, __mmask8 __B, __m128 __C, __m128 __D) +{ + return (__m128bh)__builtin_ia32_cvtne2ps2bf16_v8hi_mask(__C, __D, __A, __B); +} + +extern __inline __m128bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtne2ps_pbh (__mmask8 __A, __m128 __B, __m128 __C) +{ + return (__m128bh)__builtin_ia32_cvtne2ps2bf16_v8hi_maskz(__B, __C, __A); +} + +/* vcvtneps2bf16 */ + +extern __inline __m128bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtneps_pbh (__m256 __A) +{ + return (__m128bh)__builtin_ia32_cvtneps2bf16_v8sf(__A); +} + +extern __inline __m128bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtneps_pbh (__m128bh __A, __mmask8 __B, __m256 __C) +{ + return (__m128bh)__builtin_ia32_cvtneps2bf16_v8sf_mask(__C, __A, __B); +} + +extern __inline __m128bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtneps_pbh (__mmask8 __A, __m256 __B) +{ + return (__m128bh)__builtin_ia32_cvtneps2bf16_v8sf_maskz(__B, __A); +} + +extern __inline __m128bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtneps_pbh (__m128 __A) +{ + return (__m128bh)__builtin_ia32_cvtneps2bf16_v4sf(__A); +} + +extern __inline __m128bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtneps_pbh (__m128bh __A, __mmask8 __B, __m128 __C) +{ + return (__m128bh)__builtin_ia32_cvtneps2bf16_v4sf_mask(__C, __A, __B); +} + +extern __inline __m128bh +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtneps_pbh (__mmask8 __A, __m128 __B) +{ + return (__m128bh)__builtin_ia32_cvtneps2bf16_v4sf_maskz(__B, __A); +} + +/* vdpbf16ps */ + +extern __inline __m256 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_dpbf16_ps (__m256 __A, __m256bh __B, __m256bh __C) +{ + return (__m256)__builtin_ia32_dpbf16ps_v8sf(__A, __B, __C); +} + +extern __inline __m256 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_dpbf16_ps (__m256 __A, __mmask8 __B, __m256bh __C, __m256bh __D) +{ + return (__m256)__builtin_ia32_dpbf16ps_v8sf_mask(__A, __C, __D, __B); +} + +extern __inline __m256 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_dpbf16_ps (__mmask8 __A, __m256 __B, __m256bh __C, __m256bh __D) +{ + return (__m256)__builtin_ia32_dpbf16ps_v8sf_maskz(__B, __C, __D, __A); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_dpbf16_ps (__m128 __A, __m128bh __B, __m128bh __C) +{ + return (__m128)__builtin_ia32_dpbf16ps_v4sf(__A, __B, __C); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_dpbf16_ps (__m128 __A, __mmask8 __B, __m128bh __C, __m128bh __D) +{ + return (__m128)__builtin_ia32_dpbf16ps_v4sf_mask(__A, __C, __D, __B); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_dpbf16_ps (__mmask8 __A, __m128 __B, __m128bh __C, __m128bh __D) +{ + return (__m128)__builtin_ia32_dpbf16ps_v4sf_maskz(__B, __C, __D, __A); +} + +extern __inline __bfloat16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtness_sbh (float __A) +{ + __v4sf __V = {__A, 0, 0, 0}; + __v8hi __R = __builtin_ia32_cvtneps2bf16_v4sf_mask ((__v4sf)__V, + (__v8hi)_mm_undefined_si128 (), (__mmask8)-1); + return __R[0]; +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpbh_ps (__m128bh __A) +{ + return (__m128)_mm_castsi128_ps ((__m128i)_mm_slli_epi32 ( + (__m128i)_mm_cvtepi16_epi32 ((__m128i)__A), 16)); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtpbh_ps (__m128bh __A) +{ + return (__m256)_mm256_castsi256_ps ((__m256i)_mm256_slli_epi32 ( + (__m256i)_mm256_cvtepi16_epi32 ((__m128i)__A), 16)); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtpbh_ps (__mmask8 __U, __m128bh __A) +{ + return (__m128)_mm_castsi128_ps ((__m128i)_mm_slli_epi32 ( + (__m128i)_mm_maskz_cvtepi16_epi32 ( + (__mmask8)__U, (__m128i)__A), 16)); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtpbh_ps (__mmask8 __U, __m128bh __A) +{ + return (__m256)_mm256_castsi256_ps ((__m256i)_mm256_slli_epi32 ( + (__m256i)_mm256_maskz_cvtepi16_epi32 ( + (__mmask8)__U, (__m128i)__A), 16)); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtpbh_ps (__m128 __S, __mmask8 __U, __m128bh __A) +{ + return (__m128)_mm_castsi128_ps ((__m128i)_mm_mask_slli_epi32 ( + (__m128i)__S, (__mmask8)__U, (__m128i)_mm_cvtepi16_epi32 ( + (__m128i)__A), 16)); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtpbh_ps (__m256 __S, __mmask8 __U, __m128bh __A) +{ + return (__m256)_mm256_castsi256_ps ((__m256i)_mm256_mask_slli_epi32 ( + (__m256i)__S, (__mmask8)__U, (__m256i)_mm256_cvtepi16_epi32 ( + (__m128i)__A), 16)); +} + +#ifdef __DISABLE_AVX512BF16VL__ +#undef __DISABLE_AVX512BF16VL__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512BF16VL__ */ + +#endif /* _AVX512BF16VLINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512bitalgintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512bitalgintrin.h new file mode 100644 index 0000000..7eb9520 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512bitalgintrin.h @@ -0,0 +1,283 @@ +/* Copyright (C) 2017-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _IMMINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _AVX512BITALGINTRIN_H_INCLUDED +#define _AVX512BITALGINTRIN_H_INCLUDED + +#ifndef __AVX512BITALG__ +#pragma GCC push_options +#pragma GCC target("avx512bitalg") +#define __DISABLE_AVX512BITALG__ +#endif /* __AVX512BITALG__ */ + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_popcnt_epi8 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcountb_v64qi ((__v64qi) __A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_popcnt_epi16 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcountw_v32hi ((__v32hi) __A); +} + +#ifdef __DISABLE_AVX512BITALG__ +#undef __DISABLE_AVX512BITALG__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512BITALG__ */ + +#if !defined(__AVX512BITALG__) || !defined(__AVX512BW__) +#pragma GCC push_options +#pragma GCC target("avx512bitalg,avx512bw") +#define __DISABLE_AVX512BITALGBW__ +#endif /* __AVX512VLBW__ */ + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_popcnt_epi8 (__m512i __W, __mmask64 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcountb_v64qi_mask ((__v64qi) __A, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_popcnt_epi8 (__mmask64 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcountb_v64qi_mask ((__v64qi) __A, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_popcnt_epi16 (__m512i __W, __mmask32 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcountw_v32hi_mask ((__v32hi) __A, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_popcnt_epi16 (__mmask32 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcountw_v32hi_mask ((__v32hi) __A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __mmask64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_bitshuffle_epi64_mask (__m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_vpshufbitqmb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_bitshuffle_epi64_mask (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_vpshufbitqmb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__mmask64) __M); +} + +#ifdef __DISABLE_AVX512BITALGBW__ +#undef __DISABLE_AVX512BITALGBW__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512BITALGBW__ */ + +#if !defined(__AVX512BITALG__) || !defined(__AVX512VL__) || !defined(__AVX512BW__) +#pragma GCC push_options +#pragma GCC target("avx512bitalg,avx512vl,avx512bw") +#define __DISABLE_AVX512BITALGVLBW__ +#endif /* __AVX512VLBW__ */ + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_popcnt_epi8 (__m256i __W, __mmask32 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcountb_v32qi_mask ((__v32qi) __A, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_popcnt_epi8 (__mmask32 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcountb_v32qi_mask ((__v32qi) __A, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __mmask32 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_bitshuffle_epi64_mask (__m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_vpshufbitqmb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_bitshuffle_epi64_mask (__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_vpshufbitqmb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__mmask32) __M); +} + +#ifdef __DISABLE_AVX512BITALGVLBW__ +#undef __DISABLE_AVX512BITALGVLBW__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512BITALGVLBW__ */ + + +#if !defined(__AVX512BITALG__) || !defined(__AVX512VL__) +#pragma GCC push_options +#pragma GCC target("avx512bitalg,avx512vl") +#define __DISABLE_AVX512BITALGVL__ +#endif /* __AVX512VLBW__ */ + +extern __inline __mmask16 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_bitshuffle_epi64_mask (__m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_vpshufbitqmb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_bitshuffle_epi64_mask (__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_vpshufbitqmb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__mmask16) __M); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_popcnt_epi8 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcountb_v32qi ((__v32qi) __A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_popcnt_epi16 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcountw_v16hi ((__v16hi) __A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_popcnt_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcountb_v16qi ((__v16qi) __A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_popcnt_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcountw_v8hi ((__v8hi) __A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_popcnt_epi16 (__m256i __W, __mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcountw_v16hi_mask ((__v16hi) __A, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_popcnt_epi16 (__mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcountw_v16hi_mask ((__v16hi) __A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_popcnt_epi8 (__m128i __W, __mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcountb_v16qi_mask ((__v16qi) __A, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_popcnt_epi8 (__mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcountb_v16qi_mask ((__v16qi) __A, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_popcnt_epi16 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcountw_v8hi_mask ((__v8hi) __A, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_popcnt_epi16 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcountw_v8hi_mask ((__v8hi) __A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} +#ifdef __DISABLE_AVX512BITALGVL__ +#undef __DISABLE_AVX512BITALGVL__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512BITALGBW__ */ + +#endif /* _AVX512BITALGINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512bwintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512bwintrin.h new file mode 100644 index 0000000..7566cc7 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512bwintrin.h @@ -0,0 +1,3333 @@ +/* Copyright (C) 2014-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512BWINTRIN_H_INCLUDED +#define _AVX512BWINTRIN_H_INCLUDED + +#ifndef __AVX512BW__ +#pragma GCC push_options +#pragma GCC target("avx512bw") +#define __DISABLE_AVX512BW__ +#endif /* __AVX512BW__ */ + +/* Internal data types for implementing the intrinsics. */ +typedef short __v32hi __attribute__ ((__vector_size__ (64))); +typedef short __v32hi_u __attribute__ ((__vector_size__ (64), \ + __may_alias__, __aligned__ (1))); +typedef char __v64qi __attribute__ ((__vector_size__ (64))); +typedef char __v64qi_u __attribute__ ((__vector_size__ (64), \ + __may_alias__, __aligned__ (1))); + +typedef unsigned long long __mmask64; + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_ktest_mask32_u8 (__mmask32 __A, __mmask32 __B, unsigned char *__CF) +{ + *__CF = (unsigned char) __builtin_ia32_ktestcsi (__A, __B); + return (unsigned char) __builtin_ia32_ktestzsi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_ktest_mask64_u8 (__mmask64 __A, __mmask64 __B, unsigned char *__CF) +{ + *__CF = (unsigned char) __builtin_ia32_ktestcdi (__A, __B); + return (unsigned char) __builtin_ia32_ktestzdi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_ktestz_mask32_u8 (__mmask32 __A, __mmask32 __B) +{ + return (unsigned char) __builtin_ia32_ktestzsi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_ktestz_mask64_u8 (__mmask64 __A, __mmask64 __B) +{ + return (unsigned char) __builtin_ia32_ktestzdi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_ktestc_mask32_u8 (__mmask32 __A, __mmask32 __B) +{ + return (unsigned char) __builtin_ia32_ktestcsi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_ktestc_mask64_u8 (__mmask64 __A, __mmask64 __B) +{ + return (unsigned char) __builtin_ia32_ktestcdi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kortest_mask32_u8 (__mmask32 __A, __mmask32 __B, unsigned char *__CF) +{ + *__CF = (unsigned char) __builtin_ia32_kortestcsi (__A, __B); + return (unsigned char) __builtin_ia32_kortestzsi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kortest_mask64_u8 (__mmask64 __A, __mmask64 __B, unsigned char *__CF) +{ + *__CF = (unsigned char) __builtin_ia32_kortestcdi (__A, __B); + return (unsigned char) __builtin_ia32_kortestzdi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kortestz_mask32_u8 (__mmask32 __A, __mmask32 __B) +{ + return (unsigned char) __builtin_ia32_kortestzsi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kortestz_mask64_u8 (__mmask64 __A, __mmask64 __B) +{ + return (unsigned char) __builtin_ia32_kortestzdi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kortestc_mask32_u8 (__mmask32 __A, __mmask32 __B) +{ + return (unsigned char) __builtin_ia32_kortestcsi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kortestc_mask64_u8 (__mmask64 __A, __mmask64 __B) +{ + return (unsigned char) __builtin_ia32_kortestcdi (__A, __B); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kadd_mask32 (__mmask32 __A, __mmask32 __B) +{ + return (__mmask32) __builtin_ia32_kaddsi ((__mmask32) __A, (__mmask32) __B); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kadd_mask64 (__mmask64 __A, __mmask64 __B) +{ + return (__mmask64) __builtin_ia32_kadddi ((__mmask64) __A, (__mmask64) __B); +} + +extern __inline unsigned int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_cvtmask32_u32 (__mmask32 __A) +{ + return (unsigned int) __builtin_ia32_kmovd ((__mmask32) __A); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_cvtmask64_u64 (__mmask64 __A) +{ + return (unsigned long long) __builtin_ia32_kmovq ((__mmask64) __A); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_cvtu32_mask32 (unsigned int __A) +{ + return (__mmask32) __builtin_ia32_kmovd ((__mmask32) __A); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_cvtu64_mask64 (unsigned long long __A) +{ + return (__mmask64) __builtin_ia32_kmovq ((__mmask64) __A); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_load_mask32 (__mmask32 *__A) +{ + return (__mmask32) __builtin_ia32_kmovd (*__A); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_load_mask64 (__mmask64 *__A) +{ + return (__mmask64) __builtin_ia32_kmovq (*(__mmask64 *) __A); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_store_mask32 (__mmask32 *__A, __mmask32 __B) +{ + *(__mmask32 *) __A = __builtin_ia32_kmovd (__B); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_store_mask64 (__mmask64 *__A, __mmask64 __B) +{ + *(__mmask64 *) __A = __builtin_ia32_kmovq (__B); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_knot_mask32 (__mmask32 __A) +{ + return (__mmask32) __builtin_ia32_knotsi ((__mmask32) __A); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_knot_mask64 (__mmask64 __A) +{ + return (__mmask64) __builtin_ia32_knotdi ((__mmask64) __A); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kor_mask32 (__mmask32 __A, __mmask32 __B) +{ + return (__mmask32) __builtin_ia32_korsi ((__mmask32) __A, (__mmask32) __B); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kor_mask64 (__mmask64 __A, __mmask64 __B) +{ + return (__mmask64) __builtin_ia32_kordi ((__mmask64) __A, (__mmask64) __B); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kxnor_mask32 (__mmask32 __A, __mmask32 __B) +{ + return (__mmask32) __builtin_ia32_kxnorsi ((__mmask32) __A, (__mmask32) __B); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kxnor_mask64 (__mmask64 __A, __mmask64 __B) +{ + return (__mmask64) __builtin_ia32_kxnordi ((__mmask64) __A, (__mmask64) __B); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kxor_mask32 (__mmask32 __A, __mmask32 __B) +{ + return (__mmask32) __builtin_ia32_kxorsi ((__mmask32) __A, (__mmask32) __B); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kxor_mask64 (__mmask64 __A, __mmask64 __B) +{ + return (__mmask64) __builtin_ia32_kxordi ((__mmask64) __A, (__mmask64) __B); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kand_mask32 (__mmask32 __A, __mmask32 __B) +{ + return (__mmask32) __builtin_ia32_kandsi ((__mmask32) __A, (__mmask32) __B); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kand_mask64 (__mmask64 __A, __mmask64 __B) +{ + return (__mmask64) __builtin_ia32_kanddi ((__mmask64) __A, (__mmask64) __B); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kandn_mask32 (__mmask32 __A, __mmask32 __B) +{ + return (__mmask32) __builtin_ia32_kandnsi ((__mmask32) __A, (__mmask32) __B); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kandn_mask64 (__mmask64 __A, __mmask64 __B) +{ + return (__mmask64) __builtin_ia32_kandndi ((__mmask64) __A, (__mmask64) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_movdquhi512_mask ((__v32hi) __A, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_movdquhi512_mask ((__v32hi) __A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_loadu_epi16 (void const *__P) +{ + return (__m512i) (*(__v32hi_u *) __P); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquhi512_mask ((const short *) __P, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquhi512_mask ((const short *) __P, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_storeu_epi16 (void *__P, __m512i __A) +{ + *(__v32hi_u *) __P = (__v32hi_u) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A) +{ + __builtin_ia32_storedquhi512_mask ((short *) __P, + (__v32hi) __A, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_movdquqi512_mask ((__v64qi) __A, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_movdquqi512_mask ((__v64qi) __A, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_kunpackw (__mmask32 __A, __mmask32 __B) +{ + return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A, + (__mmask32) __B); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kunpackw_mask32 (__mmask16 __A, __mmask16 __B) +{ + return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A, + (__mmask32) __B); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_kunpackd (__mmask64 __A, __mmask64 __B) +{ + return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A, + (__mmask64) __B); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kunpackd_mask64 (__mmask32 __A, __mmask32 __B) +{ + return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A, + (__mmask64) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_loadu_epi8 (void const *__P) +{ + return (__m512i) (*(__v64qi_u *) __P); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquqi512_mask ((const char *) __P, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquqi512_mask ((const char *) __P, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_storeu_epi8 (void *__P, __m512i __A) +{ + *(__v64qi_u *) __P = (__v64qi_u) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A) +{ + __builtin_ia32_storedquqi512_mask ((char *) __P, + (__v64qi) __A, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sad_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psadbw512 ((__v64qi) __A, + (__v64qi) __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi16_epi8 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A, + (__v32qi) _mm256_undefined_si256(), + (__mmask32) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A) +{ + __builtin_ia32_pmovwb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A, + (__v32qi) __O, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A, + (__v32qi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtsepi16_epi8 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A, + (__v32qi)_mm256_undefined_si256(), + (__mmask32) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A) +{ + __builtin_ia32_pmovswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A, + (__v32qi)__O, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A, + (__v32qi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtusepi16_epi8 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A, + (__v32qi)_mm256_undefined_si256(), + (__mmask32) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A, + (__v32qi) __O, + __M); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A) +{ + __builtin_ia32_pmovuswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A, + (__v32qi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcastb_epi8 (__m128i __A) +{ + return (__m512i) __builtin_ia32_pbroadcastb512_mask ((__v16qi) __A, + (__v64qi)_mm512_undefined_epi32(), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_pbroadcastb512_mask ((__v16qi) __A, + (__v64qi) __O, + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_pbroadcastb512_mask ((__v16qi) __A, + (__v64qi) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A) +{ + return (__m512i) __builtin_ia32_pbroadcastb512_gpr_mask (__A, + (__v64qi) __O, + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_set1_epi8 (__mmask64 __M, char __A) +{ + return (__m512i) + __builtin_ia32_pbroadcastb512_gpr_mask (__A, + (__v64qi) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcastw_epi16 (__m128i __A) +{ + return (__m512i) __builtin_ia32_pbroadcastw512_mask ((__v8hi) __A, + (__v32hi)_mm512_undefined_epi32(), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_pbroadcastw512_mask ((__v8hi) __A, + (__v32hi) __O, + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_pbroadcastw512_mask ((__v8hi) __A, + (__v32hi) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A) +{ + return (__m512i) __builtin_ia32_pbroadcastw512_gpr_mask (__A, + (__v32hi) __O, + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_set1_epi16 (__mmask32 __M, short __A) +{ + return (__m512i) + __builtin_ia32_pbroadcastw512_gpr_mask (__A, + (__v32hi) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mulhrs_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mulhrs_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mulhrs_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mulhi_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mulhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mulhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mulhi_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mulhi_epu16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mullo_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v32hu) __A * (__v32hu) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mullo_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mullo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi8_epi16 (__m256i __A) +{ + return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi8_epi16 (__m512i __W, __mmask32 __U, __m256i __A) +{ + return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi8_epi16 (__mmask32 __U, __m256i __A) +{ + return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepu8_epi16 (__m256i __A) +{ + return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu8_epi16 (__m512i __W, __mmask32 __U, __m256i __A) +{ + return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu8_epi16 (__mmask32 __U, __m256i __A) +{ + return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutexvar_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B, + (__v32hi) __A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B, + (__v32hi) __A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B, + (__v32hi) __A, + (__v32hi) __W, + (__mmask32) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutex2var_epi16 (__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varhi512_mask ((__v32hi) __I + /* idx */ , + (__v32hi) __A, + (__v32hi) __B, + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutex2var_epi16 (__m512i __A, __mmask32 __U, + __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varhi512_mask ((__v32hi) __I + /* idx */ , + (__v32hi) __A, + (__v32hi) __B, + (__mmask32) + __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask2_permutex2var_epi16 (__m512i __A, __m512i __I, + __mmask32 __U, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermi2varhi512_mask ((__v32hi) __A, + (__v32hi) __I + /* idx */ , + (__v32hi) __B, + (__mmask32) + __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutex2var_epi16 (__mmask32 __U, __m512i __A, + __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varhi512_maskz ((__v32hi) __I + /* idx */ , + (__v32hi) __A, + (__v32hi) __B, + (__mmask32) + __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_avg_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_avg_epu8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pavgb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_add_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v64qu) __A + (__v64qu) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_add_epi8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_add_epi8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sub_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v64qu) __A - (__v64qu) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sub_epi8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sub_epi8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_avg_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_avg_epu16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pavgw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_subs_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_subs_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_adds_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_adds_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sub_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v32hu) __A - (__v32hu) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sub_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sub_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_subs_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_subs_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_add_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v32hu) __A + (__v32hu) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_add_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_add_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_adds_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_adds_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srl_epi16 (__m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A, + (__v8hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srl_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m128i __B) +{ + return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A, + (__v8hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srl_epi16 (__mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A, + (__v8hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_packs_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sll_epi16 (__m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A, + (__v8hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sll_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m128i __B) +{ + return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A, + (__v8hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sll_epi16 (__mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A, + (__v8hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maddubs_epi16 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X, + (__v64qi) __Y, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_maddubs_epi16 (__m512i __W, __mmask32 __U, __m512i __X, + __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X, + (__v64qi) __Y, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_maddubs_epi16 (__mmask32 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X, + (__v64qi) __Y, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_madd_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_madd_epi16 (__m512i __W, __mmask16 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_madd_epi16 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_unpackhi_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_unpackhi_epi8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_unpackhi_epi8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckhbw512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_unpackhi_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_unpackhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_unpackhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckhwd512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_unpacklo_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_unpacklo_epi8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_unpacklo_epi8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpcklbw512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_unpacklo_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_unpacklo_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_unpacklo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpcklwd512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpeq_epu8_mask (__m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __A, + (__v64qi) __B, 0, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpeq_epi8_mask (__m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_pcmpeqb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpeq_epu8_mask (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __A, + (__v64qi) __B, 0, + __U); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpeq_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_pcmpeqb512_mask ((__v64qi) __A, + (__v64qi) __B, + __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpeq_epu16_mask (__m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __A, + (__v32hi) __B, 0, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpeq_epi16_mask (__m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_pcmpeqw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpeq_epu16_mask (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __A, + (__v32hi) __B, 0, + __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpeq_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_pcmpeqw512_mask ((__v32hi) __A, + (__v32hi) __B, + __U); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpgt_epu8_mask (__m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __A, + (__v64qi) __B, 6, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpgt_epi8_mask (__m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_pcmpgtb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpgt_epu8_mask (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __A, + (__v64qi) __B, 6, + __U); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpgt_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_pcmpgtb512_mask ((__v64qi) __A, + (__v64qi) __B, + __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpgt_epu16_mask (__m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __A, + (__v32hi) __B, 6, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpgt_epi16_mask (__m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_pcmpgtw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpgt_epu16_mask (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __A, + (__v32hi) __B, 6, + __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpgt_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_pcmpgtw512_mask ((__v32hi) __A, + (__v32hi) __B, + __U); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_movepi8_mask (__m512i __A) +{ + return (__mmask64) __builtin_ia32_cvtb2mask512 ((__v64qi) __A); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_movepi16_mask (__m512i __A) +{ + return (__mmask32) __builtin_ia32_cvtw2mask512 ((__v32hi) __A); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_movm_epi8 (__mmask64 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2b512 (__A); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_movm_epi16 (__mmask32 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2w512 (__A); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_test_epi8_mask (__m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_ptestmb512 ((__v64qi) __A, + (__v64qi) __B, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_test_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_ptestmb512 ((__v64qi) __A, + (__v64qi) __B, __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_test_epi16_mask (__m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_ptestmw512 ((__v32hi) __A, + (__v32hi) __B, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_test_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_ptestmw512 ((__v32hi) __A, + (__v32hi) __B, __U); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_testn_epi8_mask (__m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_ptestnmb512 ((__v64qi) __A, + (__v64qi) __B, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_testn_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_ptestnmb512 ((__v64qi) __A, + (__v64qi) __B, __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_testn_epi16_mask (__m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_ptestnmw512 ((__v32hi) __A, + (__v32hi) __B, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_testn_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__mmask32) __builtin_ia32_ptestnmw512 ((__v32hi) __A, + (__v32hi) __B, __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shuffle_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shuffle_epi8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shuffle_epi8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sra_epi16 (__m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A, + (__v8hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sra_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m128i __B) +{ + return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A, + (__v8hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sra_epi16 (__mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A, + (__v8hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srav_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srav_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srav_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srlv_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srlv_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srlv_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sllv_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sllv_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_packs_epi16 (__m512i __W, __mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v64qi) __W, + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_packs_epi16 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_packsswb512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v64qi) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_packus_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_packus_epi16 (__m512i __W, __mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v64qi) __W, + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_packus_epi16 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_packuswb512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_abs_epi8 (__m512i __A) +{ + return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_abs_epi16 (__m512i __A) +{ + return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpneq_epu8_mask (__mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 4, + (__mmask64) __M); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmplt_epu8_mask (__mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 1, + (__mmask64) __M); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpge_epu8_mask (__mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 5, + (__mmask64) __M); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmple_epu8_mask (__mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 2, + (__mmask64) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpneq_epu16_mask (__mmask32 __M, __m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 4, + (__mmask32) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmplt_epu16_mask (__mmask32 __M, __m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 1, + (__mmask32) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpge_epu16_mask (__mmask32 __M, __m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 5, + (__mmask32) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmple_epu16_mask (__mmask32 __M, __m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 2, + (__mmask32) __M); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpneq_epi8_mask (__mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 4, + (__mmask64) __M); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmplt_epi8_mask (__mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 1, + (__mmask64) __M); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpge_epi8_mask (__mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 5, + (__mmask64) __M); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmple_epi8_mask (__mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 2, + (__mmask64) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpneq_epi16_mask (__mmask32 __M, __m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 4, + (__mmask32) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmplt_epi16_mask (__mmask32 __M, __m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 1, + (__mmask32) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpge_epi16_mask (__mmask32 __M, __m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 5, + (__mmask32) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmple_epi16_mask (__mmask32 __M, __m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 2, + (__mmask32) __M); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpneq_epu8_mask (__m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 4, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmplt_epu8_mask (__m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 1, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpge_epu8_mask (__m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 5, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmple_epu8_mask (__m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 2, + (__mmask64) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpneq_epu16_mask (__m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 4, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmplt_epu16_mask (__m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 1, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpge_epu16_mask (__m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 5, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmple_epu16_mask (__m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 2, + (__mmask32) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpneq_epi8_mask (__m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 4, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmplt_epi8_mask (__m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 1, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpge_epi8_mask (__m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 5, + (__mmask64) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmple_epi8_mask (__m512i __X, __m512i __Y) +{ + return (__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, 2, + (__mmask64) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpneq_epi16_mask (__m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 4, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmplt_epi16_mask (__m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 1, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpge_epi16_mask (__m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 5, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmple_epi16_mask (__m512i __X, __m512i __Y) +{ + return (__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, 2, + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_packs_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A, + (__v16si) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_packs_epi32 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A, + (__v16si) __B, + (__v32hi) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_packs_epi32 (__m512i __W, __mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_packssdw512_mask ((__v16si) __A, + (__v16si) __B, + (__v32hi) __W, + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_packus_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A, + (__v16si) __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_packus_epi32 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A, + (__v16si) __B, + (__v32hi) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_packus_epi32 (__m512i __W, __mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_packusdw512_mask ((__v16si) __A, + (__v16si) __B, + (__v32hi) __W, + __M); +} + +#ifdef __OPTIMIZE__ +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kshiftli_mask32 (__mmask32 __A, unsigned int __B) +{ + return (__mmask32) __builtin_ia32_kshiftlisi ((__mmask32) __A, + (__mmask8) __B); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kshiftli_mask64 (__mmask64 __A, unsigned int __B) +{ + return (__mmask64) __builtin_ia32_kshiftlidi ((__mmask64) __A, + (__mmask8) __B); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kshiftri_mask32 (__mmask32 __A, unsigned int __B) +{ + return (__mmask32) __builtin_ia32_kshiftrisi ((__mmask32) __A, + (__mmask8) __B); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kshiftri_mask64 (__mmask64 __A, unsigned int __B) +{ + return (__mmask64) __builtin_ia32_kshiftridi ((__mmask64) __A, + (__mmask8) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_alignr_epi8 (__m512i __A, __m512i __B, const int __N) +{ + return (__m512i) __builtin_ia32_palignr512 ((__v8di) __A, + (__v8di) __B, __N * 8); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_alignr_epi8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B, const int __N) +{ + return (__m512i) __builtin_ia32_palignr512_mask ((__v8di) __A, + (__v8di) __B, + __N * 8, + (__v8di) __W, + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_alignr_epi8 (__mmask64 __U, __m512i __A, __m512i __B, + const int __N) +{ + return (__m512i) __builtin_ia32_palignr512_mask ((__v8di) __A, + (__v8di) __B, + __N * 8, + (__v8di) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_dbsad_epu8 (__m512i __A, __m512i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_dbpsadbw512_mask ((__v64qi) __A, + (__v64qi) __B, + __imm, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_dbsad_epu8 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_dbpsadbw512_mask ((__v64qi) __A, + (__v64qi) __B, + __imm, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_dbsad_epu8 (__mmask32 __U, __m512i __A, __m512i __B, + const int __imm) +{ + return (__m512i) __builtin_ia32_dbpsadbw512_mask ((__v64qi) __A, + (__v64qi) __B, + __imm, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srli_epi16 (__m512i __A, const int __imm) +{ + return (__m512i) __builtin_ia32_psrlwi512_mask ((__v32hi) __A, __imm, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srli_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + const int __imm) +{ + return (__m512i) __builtin_ia32_psrlwi512_mask ((__v32hi) __A, __imm, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srli_epi16 (__mmask32 __U, __m512i __A, const int __imm) +{ + return (__m512i) __builtin_ia32_psrlwi512_mask ((__v32hi) __A, __imm, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_slli_epi16 (__m512i __A, const int __B) +{ + return (__m512i) __builtin_ia32_psllwi512_mask ((__v32hi) __A, __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_slli_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + const int __B) +{ + return (__m512i) __builtin_ia32_psllwi512_mask ((__v32hi) __A, __B, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_slli_epi16 (__mmask32 __U, __m512i __A, const int __B) +{ + return (__m512i) __builtin_ia32_psllwi512_mask ((__v32hi) __A, __B, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shufflehi_epi16 (__m512i __A, const int __imm) +{ + return (__m512i) __builtin_ia32_pshufhw512_mask ((__v32hi) __A, + __imm, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shufflehi_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + const int __imm) +{ + return (__m512i) __builtin_ia32_pshufhw512_mask ((__v32hi) __A, + __imm, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shufflehi_epi16 (__mmask32 __U, __m512i __A, + const int __imm) +{ + return (__m512i) __builtin_ia32_pshufhw512_mask ((__v32hi) __A, + __imm, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shufflelo_epi16 (__m512i __A, const int __imm) +{ + return (__m512i) __builtin_ia32_pshuflw512_mask ((__v32hi) __A, + __imm, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shufflelo_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + const int __imm) +{ + return (__m512i) __builtin_ia32_pshuflw512_mask ((__v32hi) __A, + __imm, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shufflelo_epi16 (__mmask32 __U, __m512i __A, + const int __imm) +{ + return (__m512i) __builtin_ia32_pshuflw512_mask ((__v32hi) __A, + __imm, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srai_epi16 (__m512i __A, const int __imm) +{ + return (__m512i) __builtin_ia32_psrawi512_mask ((__v32hi) __A, __imm, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srai_epi16 (__m512i __W, __mmask32 __U, __m512i __A, + const int __imm) +{ + return (__m512i) __builtin_ia32_psrawi512_mask ((__v32hi) __A, __imm, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srai_epi16 (__mmask32 __U, __m512i __A, const int __imm) +{ + return (__m512i) __builtin_ia32_psrawi512_mask ((__v32hi) __A, __imm, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W) +{ + return (__m512i) __builtin_ia32_blendmw_512_mask ((__v32hi) __A, + (__v32hi) __W, + (__mmask32) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W) +{ + return (__m512i) __builtin_ia32_blendmb_512_mask ((__v64qi) __A, + (__v64qi) __W, + (__mmask64) __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_epi16_mask (__mmask32 __U, __m512i __X, __m512i __Y, + const int __P) +{ + return (__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, __P, + (__mmask32) __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_epi16_mask (__m512i __X, __m512i __Y, const int __P) +{ + return (__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, __P, + (__mmask32) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_epi8_mask (__mmask64 __U, __m512i __X, __m512i __Y, + const int __P) +{ + return (__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, __P, + (__mmask64) __U); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_epi8_mask (__m512i __X, __m512i __Y, const int __P) +{ + return (__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, __P, + (__mmask64) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_epu16_mask (__mmask32 __U, __m512i __X, __m512i __Y, + const int __P) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, __P, + (__mmask32) __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_epu16_mask (__m512i __X, __m512i __Y, const int __P) +{ + return (__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi) __X, + (__v32hi) __Y, __P, + (__mmask32) -1); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_epu8_mask (__mmask64 __U, __m512i __X, __m512i __Y, + const int __P) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, __P, + (__mmask64) __U); +} + +extern __inline __mmask64 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_epu8_mask (__m512i __X, __m512i __Y, const int __P) +{ + return (__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi) __X, + (__v64qi) __Y, __P, + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_bslli_epi128 (__m512i __A, const int __N) +{ + return (__m512i) __builtin_ia32_pslldq512 (__A, __N * 8); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_bsrli_epi128 (__m512i __A, const int __N) +{ + return (__m512i) __builtin_ia32_psrldq512 (__A, __N * 8); +} + +#else +#define _kshiftli_mask32(X, Y) \ + ((__mmask32) __builtin_ia32_kshiftlisi ((__mmask32)(X), (__mmask8)(Y))) + +#define _kshiftli_mask64(X, Y) \ + ((__mmask64) __builtin_ia32_kshiftlidi ((__mmask64)(X), (__mmask8)(Y))) + +#define _kshiftri_mask32(X, Y) \ + ((__mmask32) __builtin_ia32_kshiftrisi ((__mmask32)(X), (__mmask8)(Y))) + +#define _kshiftri_mask64(X, Y) \ + ((__mmask64) __builtin_ia32_kshiftridi ((__mmask64)(X), (__mmask8)(Y))) + +#define _mm512_alignr_epi8(X, Y, N) \ + ((__m512i) __builtin_ia32_palignr512 ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), \ + (int)((N) * 8))) + +#define _mm512_mask_alignr_epi8(W, U, X, Y, N) \ + ((__m512i) __builtin_ia32_palignr512_mask ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), (int)((N) * 8), \ + (__v8di)(__m512i)(W), (__mmask64)(U))) + +#define _mm512_maskz_alignr_epi8(U, X, Y, N) \ + ((__m512i) __builtin_ia32_palignr512_mask ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), (int)((N) * 8), \ + (__v8di)(__m512i) \ + _mm512_setzero_si512 (), \ + (__mmask64)(U))) + +#define _mm512_dbsad_epu8(X, Y, C) \ + ((__m512i) __builtin_ia32_dbpsadbw512_mask ((__v64qi)(__m512i) (X), \ + (__v64qi)(__m512i) (Y), (int) (C), \ + (__v32hi)(__m512i) \ + _mm512_setzero_si512 (), \ + (__mmask32)-1)) + +#define _mm512_mask_dbsad_epu8(W, U, X, Y, C) \ + ((__m512i) __builtin_ia32_dbpsadbw512_mask ((__v64qi)(__m512i) (X), \ + (__v64qi)(__m512i) (Y), (int) (C), \ + (__v32hi)(__m512i)(W), \ + (__mmask32)(U))) + +#define _mm512_maskz_dbsad_epu8(U, X, Y, C) \ + ((__m512i) __builtin_ia32_dbpsadbw512_mask ((__v64qi)(__m512i) (X), \ + (__v64qi)(__m512i) (Y), (int) (C), \ + (__v32hi)(__m512i) \ + _mm512_setzero_si512 (), \ + (__mmask32)(U))) + +#define _mm512_srli_epi16(A, B) \ + ((__m512i) __builtin_ia32_psrlwi512_mask ((__v32hi)(__m512i)(A), \ + (int)(B), (__v32hi)_mm512_setzero_si512 (), (__mmask32)-1)) + +#define _mm512_mask_srli_epi16(W, U, A, B) \ + ((__m512i) __builtin_ia32_psrlwi512_mask ((__v32hi)(__m512i)(A), \ + (int)(B), (__v32hi)(__m512i)(W), (__mmask32)(U))) + +#define _mm512_maskz_srli_epi16(U, A, B) \ + ((__m512i) __builtin_ia32_psrlwi512_mask ((__v32hi)(__m512i)(A), \ + (int)(B), (__v32hi)_mm512_setzero_si512 (), (__mmask32)(U))) + +#define _mm512_slli_epi16(X, C) \ + ((__m512i)__builtin_ia32_psllwi512_mask ((__v32hi)(__m512i)(X), (int)(C),\ + (__v32hi)(__m512i)_mm512_setzero_si512 (), \ + (__mmask32)-1)) + +#define _mm512_mask_slli_epi16(W, U, X, C) \ + ((__m512i)__builtin_ia32_psllwi512_mask ((__v32hi)(__m512i)(X), (int)(C),\ + (__v32hi)(__m512i)(W),\ + (__mmask32)(U))) + +#define _mm512_maskz_slli_epi16(U, X, C) \ + ((__m512i)__builtin_ia32_psllwi512_mask ((__v32hi)(__m512i)(X), (int)(C),\ + (__v32hi)(__m512i)_mm512_setzero_si512 (), \ + (__mmask32)(U))) + +#define _mm512_shufflehi_epi16(A, B) \ + ((__m512i) __builtin_ia32_pshufhw512_mask ((__v32hi)(__m512i)(A), (int)(B), \ + (__v32hi)(__m512i) \ + _mm512_setzero_si512 (), \ + (__mmask32)-1)) + +#define _mm512_mask_shufflehi_epi16(W, U, A, B) \ + ((__m512i) __builtin_ia32_pshufhw512_mask ((__v32hi)(__m512i)(A), (int)(B), \ + (__v32hi)(__m512i)(W), \ + (__mmask32)(U))) + +#define _mm512_maskz_shufflehi_epi16(U, A, B) \ + ((__m512i) __builtin_ia32_pshufhw512_mask ((__v32hi)(__m512i)(A), (int)(B), \ + (__v32hi)(__m512i) \ + _mm512_setzero_si512 (), \ + (__mmask32)(U))) + +#define _mm512_shufflelo_epi16(A, B) \ + ((__m512i) __builtin_ia32_pshuflw512_mask ((__v32hi)(__m512i)(A), (int)(B), \ + (__v32hi)(__m512i) \ + _mm512_setzero_si512 (), \ + (__mmask32)-1)) + +#define _mm512_mask_shufflelo_epi16(W, U, A, B) \ + ((__m512i) __builtin_ia32_pshuflw512_mask ((__v32hi)(__m512i)(A), (int)(B), \ + (__v32hi)(__m512i)(W), \ + (__mmask32)(U))) + +#define _mm512_maskz_shufflelo_epi16(U, A, B) \ + ((__m512i) __builtin_ia32_pshuflw512_mask ((__v32hi)(__m512i)(A), (int)(B), \ + (__v32hi)(__m512i) \ + _mm512_setzero_si512 (), \ + (__mmask32)(U))) + +#define _mm512_srai_epi16(A, B) \ + ((__m512i) __builtin_ia32_psrawi512_mask ((__v32hi)(__m512i)(A), \ + (int)(B), (__v32hi)_mm512_setzero_si512 (), (__mmask32)-1)) + +#define _mm512_mask_srai_epi16(W, U, A, B) \ + ((__m512i) __builtin_ia32_psrawi512_mask ((__v32hi)(__m512i)(A), \ + (int)(B), (__v32hi)(__m512i)(W), (__mmask32)(U))) + +#define _mm512_maskz_srai_epi16(U, A, B) \ + ((__m512i) __builtin_ia32_psrawi512_mask ((__v32hi)(__m512i)(A), \ + (int)(B), (__v32hi)_mm512_setzero_si512 (), (__mmask32)(U))) + +#define _mm512_mask_blend_epi16(__U, __A, __W) \ + ((__m512i) __builtin_ia32_blendmw_512_mask ((__v32hi) (__A), \ + (__v32hi) (__W), \ + (__mmask32) (__U))) + +#define _mm512_mask_blend_epi8(__U, __A, __W) \ + ((__m512i) __builtin_ia32_blendmb_512_mask ((__v64qi) (__A), \ + (__v64qi) (__W), \ + (__mmask64) (__U))) + +#define _mm512_cmp_epi16_mask(X, Y, P) \ + ((__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi)(__m512i)(X), \ + (__v32hi)(__m512i)(Y), (int)(P),\ + (__mmask32)(-1))) + +#define _mm512_cmp_epi8_mask(X, Y, P) \ + ((__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi)(__m512i)(X), \ + (__v64qi)(__m512i)(Y), (int)(P),\ + (__mmask64)(-1))) + +#define _mm512_cmp_epu16_mask(X, Y, P) \ + ((__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi)(__m512i)(X), \ + (__v32hi)(__m512i)(Y), (int)(P),\ + (__mmask32)(-1))) + +#define _mm512_cmp_epu8_mask(X, Y, P) \ + ((__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi)(__m512i)(X), \ + (__v64qi)(__m512i)(Y), (int)(P),\ + (__mmask64)(-1))) + +#define _mm512_mask_cmp_epi16_mask(M, X, Y, P) \ + ((__mmask32) __builtin_ia32_cmpw512_mask ((__v32hi)(__m512i)(X), \ + (__v32hi)(__m512i)(Y), (int)(P),\ + (__mmask32)(M))) + +#define _mm512_mask_cmp_epi8_mask(M, X, Y, P) \ + ((__mmask64) __builtin_ia32_cmpb512_mask ((__v64qi)(__m512i)(X), \ + (__v64qi)(__m512i)(Y), (int)(P),\ + (__mmask64)(M))) + +#define _mm512_mask_cmp_epu16_mask(M, X, Y, P) \ + ((__mmask32) __builtin_ia32_ucmpw512_mask ((__v32hi)(__m512i)(X), \ + (__v32hi)(__m512i)(Y), (int)(P),\ + (__mmask32)(M))) + +#define _mm512_mask_cmp_epu8_mask(M, X, Y, P) \ + ((__mmask64) __builtin_ia32_ucmpb512_mask ((__v64qi)(__m512i)(X), \ + (__v64qi)(__m512i)(Y), (int)(P),\ + (__mmask64)(M))) + +#define _mm512_bslli_epi128(A, N) \ + ((__m512i)__builtin_ia32_pslldq512 ((__m512i)(A), (int)(N) * 8)) + +#define _mm512_bsrli_epi128(A, N) \ + ((__m512i)__builtin_ia32_psrldq512 ((__m512i)(A), (int)(N) * 8)) + +#endif + +#ifdef __DISABLE_AVX512BW__ +#undef __DISABLE_AVX512BW__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512BW__ */ + +#endif /* _AVX512BWINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512cdintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512cdintrin.h new file mode 100644 index 0000000..4889b98 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512cdintrin.h @@ -0,0 +1,184 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512CDINTRIN_H_INCLUDED +#define _AVX512CDINTRIN_H_INCLUDED + +#ifndef __AVX512CD__ +#pragma GCC push_options +#pragma GCC target("avx512cd") +#define __DISABLE_AVX512CD__ +#endif /* __AVX512CD__ */ + +/* Internal data types for implementing the intrinsics. */ +typedef long long __v8di __attribute__ ((__vector_size__ (64))); +typedef int __v16si __attribute__ ((__vector_size__ (64))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef long long __m512i __attribute__ ((__vector_size__ (64), __may_alias__)); +typedef double __m512d __attribute__ ((__vector_size__ (64), __may_alias__)); + +typedef unsigned char __mmask8; +typedef unsigned short __mmask16; + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_conflict_epi32 (__m512i __A) +{ + return (__m512i) + __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_conflict_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_conflict_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) + __builtin_ia32_vpconflictsi_512_mask ((__v16si) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_conflict_epi64 (__m512i __A) +{ + return (__m512i) + __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A, + (__v8di) _mm512_setzero_si512 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_conflict_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_conflict_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) + __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A, + (__v8di) _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_lzcnt_epi64 (__m512i __A) +{ + return (__m512i) + __builtin_ia32_vplzcntq_512_mask ((__v8di) __A, + (__v8di) _mm512_setzero_si512 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_lzcnt_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_lzcnt_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) + __builtin_ia32_vplzcntq_512_mask ((__v8di) __A, + (__v8di) _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_lzcnt_epi32 (__m512i __A) +{ + return (__m512i) + __builtin_ia32_vplzcntd_512_mask ((__v16si) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_lzcnt_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_lzcnt_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) + __builtin_ia32_vplzcntd_512_mask ((__v16si) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcastmb_epi64 (__mmask8 __A) +{ + return (__m512i) __builtin_ia32_broadcastmb512 (__A); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcastmw_epi32 (__mmask16 __A) +{ + return (__m512i) __builtin_ia32_broadcastmw512 (__A); +} + +#ifdef __DISABLE_AVX512CD__ +#undef __DISABLE_AVX512CD__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512CD__ */ + +#endif /* _AVX512CDINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512dqintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512dqintrin.h new file mode 100644 index 0000000..e924250 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512dqintrin.h @@ -0,0 +1,2891 @@ +/* Copyright (C) 2014-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512DQINTRIN_H_INCLUDED +#define _AVX512DQINTRIN_H_INCLUDED + +#ifndef __AVX512DQ__ +#pragma GCC push_options +#pragma GCC target("avx512dq") +#define __DISABLE_AVX512DQ__ +#endif /* __AVX512DQ__ */ + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_ktest_mask8_u8 (__mmask8 __A, __mmask8 __B, unsigned char *__CF) +{ + *__CF = (unsigned char) __builtin_ia32_ktestcqi (__A, __B); + return (unsigned char) __builtin_ia32_ktestzqi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_ktestz_mask8_u8 (__mmask8 __A, __mmask8 __B) +{ + return (unsigned char) __builtin_ia32_ktestzqi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_ktestc_mask8_u8 (__mmask8 __A, __mmask8 __B) +{ + return (unsigned char) __builtin_ia32_ktestcqi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_ktest_mask16_u8 (__mmask16 __A, __mmask16 __B, unsigned char *__CF) +{ + *__CF = (unsigned char) __builtin_ia32_ktestchi (__A, __B); + return (unsigned char) __builtin_ia32_ktestzhi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_ktestz_mask16_u8 (__mmask16 __A, __mmask16 __B) +{ + return (unsigned char) __builtin_ia32_ktestzhi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_ktestc_mask16_u8 (__mmask16 __A, __mmask16 __B) +{ + return (unsigned char) __builtin_ia32_ktestchi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kortest_mask8_u8 (__mmask8 __A, __mmask8 __B, unsigned char *__CF) +{ + *__CF = (unsigned char) __builtin_ia32_kortestcqi (__A, __B); + return (unsigned char) __builtin_ia32_kortestzqi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kortestz_mask8_u8 (__mmask8 __A, __mmask8 __B) +{ + return (unsigned char) __builtin_ia32_kortestzqi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kortestc_mask8_u8 (__mmask8 __A, __mmask8 __B) +{ + return (unsigned char) __builtin_ia32_kortestcqi (__A, __B); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kadd_mask8 (__mmask8 __A, __mmask8 __B) +{ + return (__mmask8) __builtin_ia32_kaddqi ((__mmask8) __A, (__mmask8) __B); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kadd_mask16 (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kaddhi ((__mmask16) __A, (__mmask16) __B); +} + +extern __inline unsigned int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_cvtmask8_u32 (__mmask8 __A) +{ + return (unsigned int) __builtin_ia32_kmovb ((__mmask8 ) __A); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_cvtu32_mask8 (unsigned int __A) +{ + return (__mmask8) __builtin_ia32_kmovb ((__mmask8) __A); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_load_mask8 (__mmask8 *__A) +{ + return (__mmask8) __builtin_ia32_kmovb (*(__mmask8 *) __A); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_store_mask8 (__mmask8 *__A, __mmask8 __B) +{ + *(__mmask8 *) __A = __builtin_ia32_kmovb (__B); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_knot_mask8 (__mmask8 __A) +{ + return (__mmask8) __builtin_ia32_knotqi ((__mmask8) __A); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kor_mask8 (__mmask8 __A, __mmask8 __B) +{ + return (__mmask8) __builtin_ia32_korqi ((__mmask8) __A, (__mmask8) __B); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kxnor_mask8 (__mmask8 __A, __mmask8 __B) +{ + return (__mmask8) __builtin_ia32_kxnorqi ((__mmask8) __A, (__mmask8) __B); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kxor_mask8 (__mmask8 __A, __mmask8 __B) +{ + return (__mmask8) __builtin_ia32_kxorqi ((__mmask8) __A, (__mmask8) __B); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kand_mask8 (__mmask8 __A, __mmask8 __B) +{ + return (__mmask8) __builtin_ia32_kandqi ((__mmask8) __A, (__mmask8) __B); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kandn_mask8 (__mmask8 __A, __mmask8 __B) +{ + return (__mmask8) __builtin_ia32_kandnqi ((__mmask8) __A, (__mmask8) __B); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcast_f64x2 (__m128d __A) +{ + return (__m512d) + __builtin_ia32_broadcastf64x2_512_mask ((__v2df) __A, + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcast_f64x2 (__m512d __O, __mmask8 __M, __m128d __A) +{ + return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df) + __A, + (__v8df) + __O, __M); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A) +{ + return (__m512d) __builtin_ia32_broadcastf64x2_512_mask ((__v2df) + __A, + (__v8df) + _mm512_setzero_ps (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcast_i64x2 (__m128i __A) +{ + return (__m512i) + __builtin_ia32_broadcasti64x2_512_mask ((__v2di) __A, + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcast_i64x2 (__m512i __O, __mmask8 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di) + __A, + (__v8di) + __O, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti64x2_512_mask ((__v2di) + __A, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcast_f32x2 (__m128 __A) +{ + return (__m512) + __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A, + (__v16sf)_mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcast_f32x2 (__m512 __O, __mmask16 __M, __m128 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A, + (__v16sf) + __O, __M); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcast_f32x2 (__mmask16 __M, __m128 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x2_512_mask ((__v4sf) __A, + (__v16sf) + _mm512_setzero_ps (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcast_i32x2 (__m128i __A) +{ + return (__m512i) + __builtin_ia32_broadcasti32x2_512_mask ((__v4si) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcast_i32x2 (__m512i __O, __mmask16 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si) + __A, + (__v16si) + __O, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcast_i32x2 (__mmask16 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x2_512_mask ((__v4si) + __A, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcast_f32x8 (__m256 __A) +{ + return (__m512) + __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A, + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcast_f32x8 (__m512 __O, __mmask16 __M, __m256 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A, + (__v16sf)__O, + __M); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcast_f32x8 (__mmask16 __M, __m256 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x8_512_mask ((__v8sf) __A, + (__v16sf) + _mm512_setzero_ps (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcast_i32x8 (__m256i __A) +{ + return (__m512i) + __builtin_ia32_broadcasti32x8_512_mask ((__v8si) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcast_i32x8 (__m512i __O, __mmask16 __M, __m256i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si) + __A, + (__v16si)__O, + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcast_i32x8 (__mmask16 __M, __m256i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x8_512_mask ((__v8si) + __A, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mullo_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8du) __A * (__v8du) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mullo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mullo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_xor_pd (__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_xor_pd (__m512d __W, __mmask8 __U, __m512d __A, + __m512d __B) +{ + return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_xor_ps (__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_or_pd (__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_or_ps (__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_and_pd (__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A, + __m512d __B) +{ + return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_and_ps (__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_andnot_pd (__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A, + __m512d __B) +{ + return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_andnot_ps (__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, + __m512 __B) +{ + return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_movepi32_mask (__m512i __A) +{ + return (__mmask16) __builtin_ia32_cvtd2mask512 ((__v16si) __A); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_movepi64_mask (__m512i __A) +{ + return (__mmask8) __builtin_ia32_cvtq2mask512 ((__v8di) __A); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_movm_epi32 (__mmask16 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2d512 (__A); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_movm_epi64 (__mmask8 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2q512 (__A); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttpd_epi64 (__m512d __A) +{ + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) +{ + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) +{ + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttpd_epu64 (__m512d __A) +{ + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) +{ + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) +{ + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttps_epi64 (__m256 __A) +{ + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) +{ + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) +{ + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttps_epu64 (__m256 __A) +{ + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) +{ + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) +{ + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtpd_epi64 (__m512d __A) +{ + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) +{ + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) +{ + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtpd_epu64 (__m512d __A) +{ + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) +{ + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) +{ + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtps_epi64 (__m256 __A) +{ + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) +{ + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) +{ + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtps_epu64 (__m256 __A) +{ + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) +{ + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) +{ + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi64_ps (__m512i __A) +{ + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A) +{ + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) +{ + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepu64_ps (__m512i __A) +{ + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A) +{ + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) +{ + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi64_pd (__m512i __A) +{ + return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A) +{ + return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) +{ + return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepu64_pd (__m512i __A) +{ + return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A) +{ + return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) +{ + return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kshiftli_mask8 (__mmask8 __A, unsigned int __B) +{ + return (__mmask8) __builtin_ia32_kshiftliqi ((__mmask8) __A, (__mmask8) __B); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kshiftri_mask8 (__mmask8 __A, unsigned int __B) +{ + return (__mmask8) __builtin_ia32_kshiftriqi ((__mmask8) __A, (__mmask8) __B); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_range_pd (__m512d __A, __m512d __B, int __C) +{ + return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, + (__v8df) __B, __C, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_range_pd (__m512d __W, __mmask8 __U, + __m512d __A, __m512d __B, int __C) +{ + return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, + (__v8df) __B, __C, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_range_pd (__mmask8 __U, __m512d __A, __m512d __B, int __C) +{ + return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, + (__v8df) __B, __C, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_range_ps (__m512 __A, __m512 __B, int __C) +{ + return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, + (__v16sf) __B, __C, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_range_ps (__m512 __W, __mmask16 __U, + __m512 __A, __m512 __B, int __C) +{ + return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, + (__v16sf) __B, __C, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_range_ps (__mmask16 __U, __m512 __A, __m512 __B, int __C) +{ + return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, + (__v16sf) __B, __C, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_sd (__m128d __A, __m128d __B, int __C) +{ + return (__m128d) __builtin_ia32_reducesd_mask ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_round_sd (__m128d __A, __m128d __B, int __C, const int __R) +{ + return (__m128d) __builtin_ia32_reducesd_mask_round ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_reduce_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, int __C) +{ + return (__m128d) __builtin_ia32_reducesd_mask ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_reduce_round_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, int __C, const int __R) +{ + return (__m128d) __builtin_ia32_reducesd_mask_round ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) __W, + __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_reduce_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C) +{ + return (__m128d) __builtin_ia32_reducesd_mask ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_reduce_round_sd (__mmask8 __U, __m128d __A, __m128d __B, + int __C, const int __R) +{ + return (__m128d) __builtin_ia32_reducesd_mask_round ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) + _mm_setzero_pd (), + __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_ss (__m128 __A, __m128 __B, int __C) +{ + return (__m128) __builtin_ia32_reducess_mask ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_round_ss (__m128 __A, __m128 __B, int __C, const int __R) +{ + return (__m128) __builtin_ia32_reducess_mask_round ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_reduce_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B, int __C) +{ + return (__m128) __builtin_ia32_reducess_mask ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_reduce_round_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B, int __C, const int __R) +{ + return (__m128) __builtin_ia32_reducess_mask_round ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) __W, + __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_reduce_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C) +{ + return (__m128) __builtin_ia32_reducess_mask ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_reduce_round_ss (__mmask8 __U, __m128 __A, __m128 __B, + int __C, const int __R) +{ + return (__m128) __builtin_ia32_reducess_mask_round ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) + _mm_setzero_ps (), + __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_range_sd (__m128d __A, __m128d __B, int __C) +{ + return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_range_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, int __C) +{ + return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_range_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C) +{ + return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_range_ss (__m128 __A, __m128 __B, int __C) +{ + return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_range_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, int __C) +{ + return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_range_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C) +{ + return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_range_round_sd (__m128d __A, __m128d __B, int __C, const int __R) +{ + return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_range_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, + int __C, const int __R) +{ + return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_range_round_sd (__mmask8 __U, __m128d __A, __m128d __B, int __C, + const int __R) +{ + return (__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_range_round_ss (__m128 __A, __m128 __B, int __C, const int __R) +{ + return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_range_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, + int __C, const int __R) +{ + return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_range_round_ss (__mmask8 __U, __m128 __A, __m128 __B, int __C, + const int __R) +{ + return (__m128) __builtin_ia32_rangess128_mask_round ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, __R); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fpclass_ss_mask (__m128 __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) __A, __imm, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fpclass_sd_mask (__m128d __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) __A, __imm, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fpclass_ss_mask (__mmask8 __U, __m128 __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) __A, __imm, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fpclass_sd_mask (__mmask8 __U, __m128d __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) __A, __imm, __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundpd_epi64 (__m512d __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundpd_epi64 (__mmask8 __U, __m512d __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundpd_epu64 (__m512d __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundpd_epu64 (__mmask8 __U, __m512d __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundps_epi64 (__m256 __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundps_epi64 (__m512i __W, __mmask8 __U, __m256 __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundps_epi64 (__mmask8 __U, __m256 __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundps_epu64 (__m256 __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundps_epu64 (__m512i __W, __mmask8 __U, __m256 __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundps_epu64 (__mmask8 __U, __m256 __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundpd_epi64 (__m512d __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundpd_epi64 (__mmask8 __U, __m512d __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundpd_epu64 (__m512d __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundpd_epu64 (__mmask8 __U, __m512d __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundps_epi64 (__m256 __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundps_epi64 (__m512i __W, __mmask8 __U, __m256 __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundps_epi64 (__mmask8 __U, __m256 __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundps_epu64 (__m256 __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundps_epu64 (__m512i __W, __mmask8 __U, __m256 __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundps_epu64 (__mmask8 __U, __m256 __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundepi64_ps (__m512i __A, const int __R) +{ + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundepi64_ps (__m256 __W, __mmask8 __U, __m512i __A, + const int __R) +{ + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundepi64_ps (__mmask8 __U, __m512i __A, + const int __R) +{ + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundepu64_ps (__m512i __A, const int __R) +{ + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundepu64_ps (__m256 __W, __mmask8 __U, __m512i __A, + const int __R) +{ + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundepu64_ps (__mmask8 __U, __m512i __A, + const int __R) +{ + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U, + __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundepi64_pd (__m512i __A, const int __R) +{ + return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1, + __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundepi64_pd (__m512d __W, __mmask8 __U, __m512i __A, + const int __R) +{ + return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, + (__v8df) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundepi64_pd (__mmask8 __U, __m512i __A, + const int __R) +{ + return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundepu64_pd (__m512i __A, const int __R) +{ + return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1, + __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundepu64_pd (__m512d __W, __mmask8 __U, __m512i __A, + const int __R) +{ + return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, + (__v8df) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundepu64_pd (__mmask8 __U, __m512i __A, + const int __R) +{ + return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_pd (__m512d __A, int __B) +{ + return (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_round_pd (__m512d __A, int __B, const int __R) +{ + return (__m512d) __builtin_ia32_reducepd512_mask_round ((__v8df) __A, + __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_pd (__m512d __W, __mmask8 __U, __m512d __A, int __B) +{ + return (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_round_pd (__m512d __W, __mmask8 __U, __m512d __A, + int __B, const int __R) +{ + return (__m512d) __builtin_ia32_reducepd512_mask_round ((__v8df) __A, + __B, + (__v8df) __W, + __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_reduce_pd (__mmask8 __U, __m512d __A, int __B) +{ + return (__m512d) __builtin_ia32_reducepd512_mask ((__v8df) __A, __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_reduce_round_pd (__mmask8 __U, __m512d __A, int __B, + const int __R) +{ + return (__m512d) __builtin_ia32_reducepd512_mask_round ((__v8df) __A, + __B, + (__v8df) + _mm512_setzero_pd (), + __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_ps (__m512 __A, int __B) +{ + return (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_round_ps (__m512 __A, int __B, const int __R) +{ + return (__m512) __builtin_ia32_reduceps512_mask_round ((__v16sf) __A, + __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_ps (__m512 __W, __mmask16 __U, __m512 __A, int __B) +{ + return (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_round_ps (__m512 __W, __mmask16 __U, __m512 __A, int __B, + const int __R) +{ + return (__m512) __builtin_ia32_reduceps512_mask_round ((__v16sf) __A, + __B, + (__v16sf) __W, + __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_reduce_ps (__mmask16 __U, __m512 __A, int __B) +{ + return (__m512) __builtin_ia32_reduceps512_mask ((__v16sf) __A, __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_reduce_round_ps (__mmask16 __U, __m512 __A, int __B, + const int __R) +{ + return (__m512) __builtin_ia32_reduceps512_mask_round ((__v16sf) __A, + __B, + (__v16sf) + _mm512_setzero_ps (), + __U, __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_extractf32x8_ps (__m512 __A, const int __imm) +{ + return (__m256) __builtin_ia32_extractf32x8_mask ((__v16sf) __A, + __imm, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_extractf32x8_ps (__m256 __W, __mmask8 __U, __m512 __A, + const int __imm) +{ + return (__m256) __builtin_ia32_extractf32x8_mask ((__v16sf) __A, + __imm, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_extractf32x8_ps (__mmask8 __U, __m512 __A, + const int __imm) +{ + return (__m256) __builtin_ia32_extractf32x8_mask ((__v16sf) __A, + __imm, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_extractf64x2_pd (__m512d __A, const int __imm) +{ + return (__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df) __A, + __imm, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_extractf64x2_pd (__m128d __W, __mmask8 __U, __m512d __A, + const int __imm) +{ + return (__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df) __A, + __imm, + (__v2df) __W, + (__mmask8) + __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_extractf64x2_pd (__mmask8 __U, __m512d __A, + const int __imm) +{ + return (__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df) __A, + __imm, + (__v2df) + _mm_setzero_pd (), + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_extracti32x8_epi32 (__m512i __A, const int __imm) +{ + return (__m256i) __builtin_ia32_extracti32x8_mask ((__v16si) __A, + __imm, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_extracti32x8_epi32 (__m256i __W, __mmask8 __U, __m512i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_extracti32x8_mask ((__v16si) __A, + __imm, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_extracti32x8_epi32 (__mmask8 __U, __m512i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_extracti32x8_mask ((__v16si) __A, + __imm, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_extracti64x2_epi64 (__m512i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di) __A, + __imm, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_extracti64x2_epi64 (__m128i __W, __mmask8 __U, __m512i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di) __A, + __imm, + (__v2di) __W, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_extracti64x2_epi64 (__mmask8 __U, __m512i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di) __A, + __imm, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_range_round_pd (__m512d __A, __m512d __B, int __C, + const int __R) +{ + return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, + (__v8df) __B, __C, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1, + __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_range_round_pd (__m512d __W, __mmask8 __U, + __m512d __A, __m512d __B, int __C, + const int __R) +{ + return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, + (__v8df) __B, __C, + (__v8df) __W, + (__mmask8) __U, + __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_range_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + int __C, const int __R) +{ + return (__m512d) __builtin_ia32_rangepd512_mask ((__v8df) __A, + (__v8df) __B, __C, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_range_round_ps (__m512 __A, __m512 __B, int __C, const int __R) +{ + return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, + (__v16sf) __B, __C, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1, + __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_range_round_ps (__m512 __W, __mmask16 __U, + __m512 __A, __m512 __B, int __C, + const int __R) +{ + return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, + (__v16sf) __B, __C, + (__v16sf) __W, + (__mmask16) __U, + __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_range_round_ps (__mmask16 __U, __m512 __A, __m512 __B, + int __C, const int __R) +{ + return (__m512) __builtin_ia32_rangeps512_mask ((__v16sf) __A, + (__v16sf) __B, __C, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_inserti32x8 (__m512i __A, __m256i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_inserti32x8_mask ((__v16si) __A, + (__v8si) __B, + __imm, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_inserti32x8 (__m512i __W, __mmask16 __U, __m512i __A, + __m256i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_inserti32x8_mask ((__v16si) __A, + (__v8si) __B, + __imm, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_inserti32x8 (__mmask16 __U, __m512i __A, __m256i __B, + const int __imm) +{ + return (__m512i) __builtin_ia32_inserti32x8_mask ((__v16si) __A, + (__v8si) __B, + __imm, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_insertf32x8 (__m512 __A, __m256 __B, const int __imm) +{ + return (__m512) __builtin_ia32_insertf32x8_mask ((__v16sf) __A, + (__v8sf) __B, + __imm, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_insertf32x8 (__m512 __W, __mmask16 __U, __m512 __A, + __m256 __B, const int __imm) +{ + return (__m512) __builtin_ia32_insertf32x8_mask ((__v16sf) __A, + (__v8sf) __B, + __imm, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_insertf32x8 (__mmask16 __U, __m512 __A, __m256 __B, + const int __imm) +{ + return (__m512) __builtin_ia32_insertf32x8_mask ((__v16sf) __A, + (__v8sf) __B, + __imm, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_inserti64x2 (__m512i __A, __m128i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di) __A, + (__v2di) __B, + __imm, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_inserti64x2 (__m512i __W, __mmask8 __U, __m512i __A, + __m128i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di) __A, + (__v2di) __B, + __imm, + (__v8di) __W, + (__mmask8) + __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_inserti64x2 (__mmask8 __U, __m512i __A, __m128i __B, + const int __imm) +{ + return (__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di) __A, + (__v2di) __B, + __imm, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) + __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_insertf64x2 (__m512d __A, __m128d __B, const int __imm) +{ + return (__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df) __A, + (__v2df) __B, + __imm, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_insertf64x2 (__m512d __W, __mmask8 __U, __m512d __A, + __m128d __B, const int __imm) +{ + return (__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df) __A, + (__v2df) __B, + __imm, + (__v8df) __W, + (__mmask8) + __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_insertf64x2 (__mmask8 __U, __m512d __A, __m128d __B, + const int __imm) +{ + return (__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df) __A, + (__v2df) __B, + __imm, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) + __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fpclass_pd_mask (__mmask8 __U, __m512d __A, + const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) __A, + __imm, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fpclass_pd_mask (__m512d __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) __A, + __imm, + (__mmask8) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fpclass_ps_mask (__mmask16 __U, __m512 __A, + const int __imm) +{ + return (__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) __A, + __imm, __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fpclass_ps_mask (__m512 __A, const int __imm) +{ + return (__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) __A, + __imm, + (__mmask16) -1); +} + +#else +#define _kshiftli_mask8(X, Y) \ + ((__mmask8) __builtin_ia32_kshiftliqi ((__mmask8)(X), (__mmask8)(Y))) + +#define _kshiftri_mask8(X, Y) \ + ((__mmask8) __builtin_ia32_kshiftriqi ((__mmask8)(X), (__mmask8)(Y))) + +#define _mm_range_sd(A, B, C) \ + ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \ + (__mmask8) -1, _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_range_sd(W, U, A, B, C) \ + ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), \ + (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_range_sd(U, A, B, C) \ + ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \ + (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_range_ss(A, B, C) \ + ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \ + (__mmask8) -1, _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_range_ss(W, U, A, B, C) \ + ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), \ + (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_range_ss(U, A, B, C) \ + ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \ + (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_range_round_sd(A, B, C, R) \ + ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \ + (__mmask8) -1, (R))) + +#define _mm_mask_range_round_sd(W, U, A, B, C, R) \ + ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), \ + (__mmask8)(U), (R))) + +#define _mm_maskz_range_round_sd(U, A, B, C, R) \ + ((__m128d) __builtin_ia32_rangesd128_mask_round ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \ + (__mmask8)(U), (R))) + +#define _mm_range_round_ss(A, B, C, R) \ + ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \ + (__mmask8) -1, (R))) + +#define _mm_mask_range_round_ss(W, U, A, B, C, R) \ + ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), \ + (__mmask8)(U), (R))) + +#define _mm_maskz_range_round_ss(U, A, B, C, R) \ + ((__m128) __builtin_ia32_rangess128_mask_round ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \ + (__mmask8)(U), (R))) + +#define _mm512_cvtt_roundpd_epi64(A, B) \ + ((__m512i)__builtin_ia32_cvttpd2qq512_mask ((A), (__v8di) \ + _mm512_setzero_si512 (), \ + -1, (B))) + +#define _mm512_mask_cvtt_roundpd_epi64(W, U, A, B) \ + ((__m512i)__builtin_ia32_cvttpd2qq512_mask ((A), (__v8di)(W), (U), (B))) + +#define _mm512_maskz_cvtt_roundpd_epi64(U, A, B) \ + ((__m512i)__builtin_ia32_cvttpd2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B))) + +#define _mm512_cvtt_roundpd_epu64(A, B) \ + ((__m512i)__builtin_ia32_cvttpd2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B))) + +#define _mm512_mask_cvtt_roundpd_epu64(W, U, A, B) \ + ((__m512i)__builtin_ia32_cvttpd2uqq512_mask ((A), (__v8di)(W), (U), (B))) + +#define _mm512_maskz_cvtt_roundpd_epu64(U, A, B) \ + ((__m512i)__builtin_ia32_cvttpd2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B))) + +#define _mm512_cvtt_roundps_epi64(A, B) \ + ((__m512i)__builtin_ia32_cvttps2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B))) + +#define _mm512_mask_cvtt_roundps_epi64(W, U, A, B) \ + ((__m512i)__builtin_ia32_cvttps2qq512_mask ((A), (__v8di)(W), (U), (B))) + +#define _mm512_maskz_cvtt_roundps_epi64(U, A, B) \ + ((__m512i)__builtin_ia32_cvttps2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B))) + +#define _mm512_cvtt_roundps_epu64(A, B) \ + ((__m512i)__builtin_ia32_cvttps2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B))) + +#define _mm512_mask_cvtt_roundps_epu64(W, U, A, B) \ + ((__m512i)__builtin_ia32_cvttps2uqq512_mask ((A), (__v8di)(W), (U), (B))) + +#define _mm512_maskz_cvtt_roundps_epu64(U, A, B) \ + ((__m512i)__builtin_ia32_cvttps2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B))) + +#define _mm512_cvt_roundpd_epi64(A, B) \ + ((__m512i)__builtin_ia32_cvtpd2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B))) + +#define _mm512_mask_cvt_roundpd_epi64(W, U, A, B) \ + ((__m512i)__builtin_ia32_cvtpd2qq512_mask ((A), (__v8di)(W), (U), (B))) + +#define _mm512_maskz_cvt_roundpd_epi64(U, A, B) \ + ((__m512i)__builtin_ia32_cvtpd2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B))) + +#define _mm512_cvt_roundpd_epu64(A, B) \ + ((__m512i)__builtin_ia32_cvtpd2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B))) + +#define _mm512_mask_cvt_roundpd_epu64(W, U, A, B) \ + ((__m512i)__builtin_ia32_cvtpd2uqq512_mask ((A), (__v8di)(W), (U), (B))) + +#define _mm512_maskz_cvt_roundpd_epu64(U, A, B) \ + ((__m512i)__builtin_ia32_cvtpd2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B))) + +#define _mm512_cvt_roundps_epi64(A, B) \ + ((__m512i)__builtin_ia32_cvtps2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B))) + +#define _mm512_mask_cvt_roundps_epi64(W, U, A, B) \ + ((__m512i)__builtin_ia32_cvtps2qq512_mask ((A), (__v8di)(W), (U), (B))) + +#define _mm512_maskz_cvt_roundps_epi64(U, A, B) \ + ((__m512i)__builtin_ia32_cvtps2qq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B))) + +#define _mm512_cvt_roundps_epu64(A, B) \ + ((__m512i)__builtin_ia32_cvtps2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), -1, (B))) + +#define _mm512_mask_cvt_roundps_epu64(W, U, A, B) \ + ((__m512i)__builtin_ia32_cvtps2uqq512_mask ((A), (__v8di)(W), (U), (B))) + +#define _mm512_maskz_cvt_roundps_epu64(U, A, B) \ + ((__m512i)__builtin_ia32_cvtps2uqq512_mask ((A), (__v8di)_mm512_setzero_si512 (), (U), (B))) + +#define _mm512_cvt_roundepi64_ps(A, B) \ + ((__m256)__builtin_ia32_cvtqq2ps512_mask ((__v8di)(A), (__v8sf)_mm256_setzero_ps (), -1, (B))) + +#define _mm512_mask_cvt_roundepi64_ps(W, U, A, B) \ + ((__m256)__builtin_ia32_cvtqq2ps512_mask ((__v8di)(A), (W), (U), (B))) + +#define _mm512_maskz_cvt_roundepi64_ps(U, A, B) \ + ((__m256)__builtin_ia32_cvtqq2ps512_mask ((__v8di)(A), (__v8sf)_mm256_setzero_ps (), (U), (B))) + +#define _mm512_cvt_roundepu64_ps(A, B) \ + ((__m256)__builtin_ia32_cvtuqq2ps512_mask ((__v8di)(A), (__v8sf)_mm256_setzero_ps (), -1, (B))) + +#define _mm512_mask_cvt_roundepu64_ps(W, U, A, B) \ + ((__m256)__builtin_ia32_cvtuqq2ps512_mask ((__v8di)(A), (W), (U), (B))) + +#define _mm512_maskz_cvt_roundepu64_ps(U, A, B) \ + ((__m256)__builtin_ia32_cvtuqq2ps512_mask ((__v8di)(A), (__v8sf)_mm256_setzero_ps (), (U), (B))) + +#define _mm512_cvt_roundepi64_pd(A, B) \ + ((__m512d)__builtin_ia32_cvtqq2pd512_mask ((__v8di)(A), (__v8df)_mm512_setzero_pd (), -1, (B))) + +#define _mm512_mask_cvt_roundepi64_pd(W, U, A, B) \ + ((__m512d)__builtin_ia32_cvtqq2pd512_mask ((__v8di)(A), (W), (U), (B))) + +#define _mm512_maskz_cvt_roundepi64_pd(U, A, B) \ + ((__m512d)__builtin_ia32_cvtqq2pd512_mask ((__v8di)(A), (__v8df)_mm512_setzero_pd (), (U), (B))) + +#define _mm512_cvt_roundepu64_pd(A, B) \ + ((__m512d)__builtin_ia32_cvtuqq2pd512_mask ((__v8di)(A), (__v8df)_mm512_setzero_pd (), -1, (B))) + +#define _mm512_mask_cvt_roundepu64_pd(W, U, A, B) \ + ((__m512d)__builtin_ia32_cvtuqq2pd512_mask ((__v8di)(A), (W), (U), (B))) + +#define _mm512_maskz_cvt_roundepu64_pd(U, A, B) \ + ((__m512d)__builtin_ia32_cvtuqq2pd512_mask ((__v8di)(A), (__v8df)_mm512_setzero_pd (), (U), (B))) + +#define _mm512_reduce_pd(A, B) \ + ((__m512d) __builtin_ia32_reducepd512_mask ((__v8df)(__m512d)(A), \ + (int)(B), (__v8df)_mm512_setzero_pd (), (__mmask8)-1)) + +#define _mm512_reduce_round_pd(A, B, R) \ + ((__m512d) __builtin_ia32_reducepd512_mask_round ((__v8df)(__m512d)(A),\ + (int)(B), (__v8df)_mm512_setzero_pd (), (__mmask8)-1, (R))) + +#define _mm512_mask_reduce_pd(W, U, A, B) \ + ((__m512d) __builtin_ia32_reducepd512_mask ((__v8df)(__m512d)(A), \ + (int)(B), (__v8df)(__m512d)(W), (__mmask8)(U))) + +#define _mm512_mask_reduce_round_pd(W, U, A, B, R) \ + ((__m512d) __builtin_ia32_reducepd512_mask_round ((__v8df)(__m512d)(A),\ + (int)(B), (__v8df)(__m512d)(W), (U), (R))) + +#define _mm512_maskz_reduce_pd(U, A, B) \ + ((__m512d) __builtin_ia32_reducepd512_mask ((__v8df)(__m512d)(A), \ + (int)(B), (__v8df)_mm512_setzero_pd (), (__mmask8)(U))) + +#define _mm512_maskz_reduce_round_pd(U, A, B, R) \ + ((__m512d) __builtin_ia32_reducepd512_mask_round ((__v8df)(__m512d)(A),\ + (int)(B), (__v8df)_mm512_setzero_pd (), (U), (R))) + +#define _mm512_reduce_ps(A, B) \ + ((__m512) __builtin_ia32_reduceps512_mask ((__v16sf)(__m512)(A), \ + (int)(B), (__v16sf)_mm512_setzero_ps (), (__mmask16)-1)) + +#define _mm512_reduce_round_ps(A, B, R) \ + ((__m512) __builtin_ia32_reduceps512_mask_round ((__v16sf)(__m512)(A),\ + (int)(B), (__v16sf)_mm512_setzero_ps (), (__mmask16)-1, (R))) + +#define _mm512_mask_reduce_ps(W, U, A, B) \ + ((__m512) __builtin_ia32_reduceps512_mask ((__v16sf)(__m512)(A), \ + (int)(B), (__v16sf)(__m512)(W), (__mmask16)(U))) + +#define _mm512_mask_reduce_round_ps(W, U, A, B, R) \ + ((__m512) __builtin_ia32_reduceps512_mask_round ((__v16sf)(__m512)(A),\ + (int)(B), (__v16sf)(__m512)(W), (U), (R))) + +#define _mm512_maskz_reduce_ps(U, A, B) \ + ((__m512) __builtin_ia32_reduceps512_mask ((__v16sf)(__m512)(A), \ + (int)(B), (__v16sf)_mm512_setzero_ps (), (__mmask16)(U))) + +#define _mm512_maskz_reduce_round_ps(U, A, B, R) \ + ((__m512) __builtin_ia32_reduceps512_mask_round ((__v16sf)(__m512)(A),\ + (int)(B), (__v16sf)_mm512_setzero_ps (), (__mmask16)(U), (R))) + +#define _mm512_extractf32x8_ps(X, C) \ + ((__m256) __builtin_ia32_extractf32x8_mask ((__v16sf)(__m512) (X), \ + (int) (C), (__v8sf)(__m256) _mm256_setzero_ps (), (__mmask8)-1)) + +#define _mm512_mask_extractf32x8_ps(W, U, X, C) \ + ((__m256) __builtin_ia32_extractf32x8_mask ((__v16sf)(__m512) (X), \ + (int) (C), (__v8sf)(__m256) (W), (__mmask8) (U))) + +#define _mm512_maskz_extractf32x8_ps(U, X, C) \ + ((__m256) __builtin_ia32_extractf32x8_mask ((__v16sf)(__m512) (X), \ + (int) (C), (__v8sf)(__m256) _mm256_setzero_ps (), (__mmask8) (U))) + +#define _mm512_extractf64x2_pd(X, C) \ + ((__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df)(__m512d) (X),\ + (int) (C), (__v2df)(__m128d) _mm_setzero_pd (), (__mmask8)-1)) + +#define _mm512_mask_extractf64x2_pd(W, U, X, C) \ + ((__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df)(__m512d) (X),\ + (int) (C), (__v2df)(__m128d) (W), (__mmask8) (U))) + +#define _mm512_maskz_extractf64x2_pd(U, X, C) \ + ((__m128d) __builtin_ia32_extractf64x2_512_mask ((__v8df)(__m512d) (X),\ + (int) (C), (__v2df)(__m128d) _mm_setzero_pd (), (__mmask8) (U))) + +#define _mm512_extracti32x8_epi32(X, C) \ + ((__m256i) __builtin_ia32_extracti32x8_mask ((__v16si)(__m512i) (X), \ + (int) (C), (__v8si)(__m256i) _mm256_setzero_si256 (), (__mmask8)-1)) + +#define _mm512_mask_extracti32x8_epi32(W, U, X, C) \ + ((__m256i) __builtin_ia32_extracti32x8_mask ((__v16si)(__m512i) (X), \ + (int) (C), (__v8si)(__m256i) (W), (__mmask8) (U))) + +#define _mm512_maskz_extracti32x8_epi32(U, X, C) \ + ((__m256i) __builtin_ia32_extracti32x8_mask ((__v16si)(__m512i) (X), \ + (int) (C), (__v8si)(__m256i) _mm256_setzero_si256 (), (__mmask8) (U))) + +#define _mm512_extracti64x2_epi64(X, C) \ + ((__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di)(__m512i) (X),\ + (int) (C), (__v2di)(__m128i) _mm_setzero_si128 (), (__mmask8)-1)) + +#define _mm512_mask_extracti64x2_epi64(W, U, X, C) \ + ((__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di)(__m512i) (X),\ + (int) (C), (__v2di)(__m128i) (W), (__mmask8) (U))) + +#define _mm512_maskz_extracti64x2_epi64(U, X, C) \ + ((__m128i) __builtin_ia32_extracti64x2_512_mask ((__v8di)(__m512i) (X),\ + (int) (C), (__v2di)(__m128i) _mm_setzero_si128 (), (__mmask8) (U))) + +#define _mm512_range_pd(A, B, C) \ + ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd (), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_range_pd(W, U, A, B, C) \ + ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)(__m512d)(W), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_range_pd(U, A, B, C) \ + ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd (), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_range_ps(A, B, C) \ + ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps (), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_range_ps(W, U, A, B, C) \ + ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)(__m512)(W), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_range_ps(U, A, B, C) \ + ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps (), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_range_round_pd(A, B, C, R) \ + ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd (), (__mmask8)-1, (R))) + +#define _mm512_mask_range_round_pd(W, U, A, B, C, R) \ + ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)(__m512d)(W), (__mmask8)(U), (R))) + +#define _mm512_maskz_range_round_pd(U, A, B, C, R) \ + ((__m512d) __builtin_ia32_rangepd512_mask ((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd (), (__mmask8)(U), (R))) + +#define _mm512_range_round_ps(A, B, C, R) \ + ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps (), (__mmask16)-1, (R))) + +#define _mm512_mask_range_round_ps(W, U, A, B, C, R) \ + ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)(__m512)(W), (__mmask16)(U), (R))) + +#define _mm512_maskz_range_round_ps(U, A, B, C, R) \ + ((__m512) __builtin_ia32_rangeps512_mask ((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps (), (__mmask16)(U), (R))) + +#define _mm512_insertf64x2(X, Y, C) \ + ((__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df)(__m512d) (X),\ + (__v2df)(__m128d) (Y), (int) (C), (__v8df)(__m512d) (X), \ + (__mmask8)-1)) + +#define _mm512_mask_insertf64x2(W, U, X, Y, C) \ + ((__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df)(__m512d) (X),\ + (__v2df)(__m128d) (Y), (int) (C), (__v8df)(__m512d) (W), \ + (__mmask8) (U))) + +#define _mm512_maskz_insertf64x2(U, X, Y, C) \ + ((__m512d) __builtin_ia32_insertf64x2_512_mask ((__v8df)(__m512d) (X),\ + (__v2df)(__m128d) (Y), (int) (C), \ + (__v8df)(__m512d) _mm512_setzero_pd (), (__mmask8) (U))) + +#define _mm512_inserti64x2(X, Y, C) \ + ((__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di)(__m512i) (X),\ + (__v2di)(__m128i) (Y), (int) (C), (__v8di)(__m512i) (X), (__mmask8)-1)) + +#define _mm512_mask_inserti64x2(W, U, X, Y, C) \ + ((__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di)(__m512i) (X),\ + (__v2di)(__m128i) (Y), (int) (C), (__v8di)(__m512i) (W), \ + (__mmask8) (U))) + +#define _mm512_maskz_inserti64x2(U, X, Y, C) \ + ((__m512i) __builtin_ia32_inserti64x2_512_mask ((__v8di)(__m512i) (X),\ + (__v2di)(__m128i) (Y), (int) (C), \ + (__v8di)(__m512i) _mm512_setzero_si512 (), (__mmask8) (U))) + +#define _mm512_insertf32x8(X, Y, C) \ + ((__m512) __builtin_ia32_insertf32x8_mask ((__v16sf)(__m512) (X), \ + (__v8sf)(__m256) (Y), (int) (C),\ + (__v16sf)(__m512)_mm512_setzero_ps (),\ + (__mmask16)-1)) + +#define _mm512_mask_insertf32x8(W, U, X, Y, C) \ + ((__m512) __builtin_ia32_insertf32x8_mask ((__v16sf)(__m512) (X), \ + (__v8sf)(__m256) (Y), (int) (C),\ + (__v16sf)(__m512)(W),\ + (__mmask16)(U))) + +#define _mm512_maskz_insertf32x8(U, X, Y, C) \ + ((__m512) __builtin_ia32_insertf32x8_mask ((__v16sf)(__m512) (X), \ + (__v8sf)(__m256) (Y), (int) (C),\ + (__v16sf)(__m512)_mm512_setzero_ps (),\ + (__mmask16)(U))) + +#define _mm512_inserti32x8(X, Y, C) \ + ((__m512i) __builtin_ia32_inserti32x8_mask ((__v16si)(__m512i) (X), \ + (__v8si)(__m256i) (Y), (int) (C),\ + (__v16si)(__m512i)_mm512_setzero_si512 (),\ + (__mmask16)-1)) + +#define _mm512_mask_inserti32x8(W, U, X, Y, C) \ + ((__m512i) __builtin_ia32_inserti32x8_mask ((__v16si)(__m512i) (X), \ + (__v8si)(__m256i) (Y), (int) (C),\ + (__v16si)(__m512i)(W),\ + (__mmask16)(U))) + +#define _mm512_maskz_inserti32x8(U, X, Y, C) \ + ((__m512i) __builtin_ia32_inserti32x8_mask ((__v16si)(__m512i) (X), \ + (__v8si)(__m256i) (Y), (int) (C),\ + (__v16si)(__m512i)_mm512_setzero_si512 (),\ + (__mmask16)(U))) + +#define _mm_fpclass_ss_mask(X, C) \ + ((__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) (__m128) (X), \ + (int) (C), (__mmask8) (-1))) \ + +#define _mm_fpclass_sd_mask(X, C) \ + ((__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) (__m128d) (X), \ + (int) (C), (__mmask8) (-1))) \ + +#define _mm_mask_fpclass_ss_mask(X, C, U) \ + ((__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) (__m128) (X), \ + (int) (C), (__mmask8) (U))) + +#define _mm_mask_fpclass_sd_mask(X, C, U) \ + ((__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) (__m128d) (X), \ + (int) (C), (__mmask8) (U))) + +#define _mm512_mask_fpclass_pd_mask(u, X, C) \ + ((__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) (__m512d) (X), \ + (int) (C), (__mmask8)(u))) + +#define _mm512_mask_fpclass_ps_mask(u, x, c) \ + ((__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) (__m512) (x),\ + (int) (c),(__mmask16)(u))) + +#define _mm512_fpclass_pd_mask(X, C) \ + ((__mmask8) __builtin_ia32_fpclasspd512_mask ((__v8df) (__m512d) (X), \ + (int) (C), (__mmask8)-1)) + +#define _mm512_fpclass_ps_mask(x, c) \ + ((__mmask16) __builtin_ia32_fpclassps512_mask ((__v16sf) (__m512) (x),\ + (int) (c),(__mmask16)-1)) + +#define _mm_reduce_sd(A, B, C) \ + ((__m128d) __builtin_ia32_reducesd_mask ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \ + (__mmask8)-1)) + +#define _mm_mask_reduce_sd(W, U, A, B, C) \ + ((__m128d) __builtin_ia32_reducesd_mask ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), (__mmask8)(U))) + +#define _mm_maskz_reduce_sd(U, A, B, C) \ + ((__m128d) __builtin_ia32_reducesd_mask ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \ + (__mmask8)(U))) + +#define _mm_reduce_round_sd(A, B, C, R) \ + ((__m128d) __builtin_ia32_reducesd_round ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), (__mmask8)(U), (int)(R))) + +#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) \ + ((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_reduce_round_sd(U, A, B, C, R) \ + ((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (), \ + (__mmask8)(U), (int)(R))) + +#define _mm_reduce_ss(A, B, C) \ + ((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \ + (__mmask8)-1)) + +#define _mm_mask_reduce_ss(W, U, A, B, C) \ + ((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), (__mmask8)(U))) + +#define _mm_maskz_reduce_ss(U, A, B, C) \ + ((__m128) __builtin_ia32_reducess_mask ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \ + (__mmask8)(U))) + +#define _mm_reduce_round_ss(A, B, C, R) \ + ((__m128) __builtin_ia32_reducess_round ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), (__mmask8)(U), (int)(R))) + +#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) \ + ((__m128) __builtin_ia32_reducess_mask_round ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_reduce_round_ss(U, A, B, C, R) \ + ((__m128) __builtin_ia32_reducesd_mask_round ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (), \ + (__mmask8)(U), (int)(R))) + + +#endif + +#ifdef __DISABLE_AVX512DQ__ +#undef __DISABLE_AVX512DQ__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512DQ__ */ + +#endif /* _AVX512DQINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512erintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512erintrin.h new file mode 100644 index 0000000..6b3b679 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512erintrin.h @@ -0,0 +1,542 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512ERINTRIN_H_INCLUDED +#define _AVX512ERINTRIN_H_INCLUDED + +#ifndef __AVX512ER__ +#pragma GCC push_options +#pragma GCC target("avx512er") +#define __DISABLE_AVX512ER__ +#endif /* __AVX512ER__ */ + +/* Internal data types for implementing the intrinsics. */ +typedef double __v8df __attribute__ ((__vector_size__ (64))); +typedef float __v16sf __attribute__ ((__vector_size__ (64))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef float __m512 __attribute__ ((__vector_size__ (64), __may_alias__)); +typedef double __m512d __attribute__ ((__vector_size__ (64), __may_alias__)); + +typedef unsigned char __mmask8; +typedef unsigned short __mmask16; + +#ifdef __OPTIMIZE__ +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_exp2a23_round_pd (__m512d __A, int __R) +{ + __m512d __W; + return (__m512d) __builtin_ia32_exp2pd_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_exp2a23_round_pd (__m512d __W, __mmask8 __U, __m512d __A, int __R) +{ + return (__m512d) __builtin_ia32_exp2pd_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_exp2a23_round_pd (__mmask8 __U, __m512d __A, int __R) +{ + return (__m512d) __builtin_ia32_exp2pd_mask ((__v8df) __A, + (__v8df) _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_exp2a23_round_ps (__m512 __A, int __R) +{ + __m512 __W; + return (__m512) __builtin_ia32_exp2ps_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_exp2a23_round_ps (__m512 __W, __mmask16 __U, __m512 __A, int __R) +{ + return (__m512) __builtin_ia32_exp2ps_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_exp2a23_round_ps (__mmask16 __U, __m512 __A, int __R) +{ + return (__m512) __builtin_ia32_exp2ps_mask ((__v16sf) __A, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rcp28_round_pd (__m512d __A, int __R) +{ + __m512d __W; + return (__m512d) __builtin_ia32_rcp28pd_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rcp28_round_pd (__m512d __W, __mmask8 __U, __m512d __A, int __R) +{ + return (__m512d) __builtin_ia32_rcp28pd_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rcp28_round_pd (__mmask8 __U, __m512d __A, int __R) +{ + return (__m512d) __builtin_ia32_rcp28pd_mask ((__v8df) __A, + (__v8df) _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rcp28_round_ps (__m512 __A, int __R) +{ + __m512 __W; + return (__m512) __builtin_ia32_rcp28ps_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rcp28_round_ps (__m512 __W, __mmask16 __U, __m512 __A, int __R) +{ + return (__m512) __builtin_ia32_rcp28ps_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rcp28_round_ps (__mmask16 __U, __m512 __A, int __R) +{ + return (__m512) __builtin_ia32_rcp28ps_mask ((__v16sf) __A, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp28_round_sd (__m128d __A, __m128d __B, int __R) +{ + return (__m128d) __builtin_ia32_rcp28sd_round ((__v2df) __B, + (__v2df) __A, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rcp28_round_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, int __R) +{ + return (__m128d) __builtin_ia32_rcp28sd_mask_round ((__v2df) __B, + (__v2df) __A, + (__v2df) __W, + __U, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rcp28_round_sd (__mmask8 __U, __m128d __A, __m128d __B, int __R) +{ + return (__m128d) __builtin_ia32_rcp28sd_mask_round ((__v2df) __B, + (__v2df) __A, + (__v2df) + _mm_setzero_pd (), + __U, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp28_round_ss (__m128 __A, __m128 __B, int __R) +{ + return (__m128) __builtin_ia32_rcp28ss_round ((__v4sf) __B, + (__v4sf) __A, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rcp28_round_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B, int __R) +{ + return (__m128) __builtin_ia32_rcp28ss_mask_round ((__v4sf) __B, + (__v4sf) __A, + (__v4sf) __W, + __U, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rcp28_round_ss (__mmask8 __U, __m128 __A, __m128 __B, int __R) +{ + return (__m128) __builtin_ia32_rcp28ss_mask_round ((__v4sf) __B, + (__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + __U, + __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rsqrt28_round_pd (__m512d __A, int __R) +{ + __m512d __W; + return (__m512d) __builtin_ia32_rsqrt28pd_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rsqrt28_round_pd (__m512d __W, __mmask8 __U, __m512d __A, int __R) +{ + return (__m512d) __builtin_ia32_rsqrt28pd_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rsqrt28_round_pd (__mmask8 __U, __m512d __A, int __R) +{ + return (__m512d) __builtin_ia32_rsqrt28pd_mask ((__v8df) __A, + (__v8df) _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rsqrt28_round_ps (__m512 __A, int __R) +{ + __m512 __W; + return (__m512) __builtin_ia32_rsqrt28ps_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rsqrt28_round_ps (__m512 __W, __mmask16 __U, __m512 __A, int __R) +{ + return (__m512) __builtin_ia32_rsqrt28ps_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rsqrt28_round_ps (__mmask16 __U, __m512 __A, int __R) +{ + return (__m512) __builtin_ia32_rsqrt28ps_mask ((__v16sf) __A, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt28_round_sd (__m128d __A, __m128d __B, int __R) +{ + return (__m128d) __builtin_ia32_rsqrt28sd_round ((__v2df) __B, + (__v2df) __A, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rsqrt28_round_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, int __R) +{ + return (__m128d) __builtin_ia32_rsqrt28sd_mask_round ((__v2df) __B, + (__v2df) __A, + (__v2df) __W, + __U, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rsqrt28_round_sd (__mmask8 __U, __m128d __A, __m128d __B, int __R) +{ + return (__m128d) __builtin_ia32_rsqrt28sd_mask_round ((__v2df) __B, + (__v2df) __A, + (__v2df) + _mm_setzero_pd (), + __U, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt28_round_ss (__m128 __A, __m128 __B, int __R) +{ + return (__m128) __builtin_ia32_rsqrt28ss_round ((__v4sf) __B, + (__v4sf) __A, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rsqrt28_round_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B, int __R) +{ + return (__m128) __builtin_ia32_rsqrt28ss_mask_round ((__v4sf) __B, + (__v4sf) __A, + (__v4sf) __W, + __U, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rsqrt28_round_ss (__mmask8 __U, __m128 __A, __m128 __B, int __R) +{ + return (__m128) __builtin_ia32_rsqrt28ss_mask_round ((__v4sf) __B, + (__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + __U, + __R); +} + +#else +#define _mm512_exp2a23_round_pd(A, C) \ + __builtin_ia32_exp2pd_mask(A, (__v8df)_mm512_setzero_pd(), -1, C) + +#define _mm512_mask_exp2a23_round_pd(W, U, A, C) \ + __builtin_ia32_exp2pd_mask(A, W, U, C) + +#define _mm512_maskz_exp2a23_round_pd(U, A, C) \ + __builtin_ia32_exp2pd_mask(A, (__v8df)_mm512_setzero_pd(), U, C) + +#define _mm512_exp2a23_round_ps(A, C) \ + __builtin_ia32_exp2ps_mask(A, (__v16sf)_mm512_setzero_ps(), -1, C) + +#define _mm512_mask_exp2a23_round_ps(W, U, A, C) \ + __builtin_ia32_exp2ps_mask(A, W, U, C) + +#define _mm512_maskz_exp2a23_round_ps(U, A, C) \ + __builtin_ia32_exp2ps_mask(A, (__v16sf)_mm512_setzero_ps(), U, C) + +#define _mm512_rcp28_round_pd(A, C) \ + __builtin_ia32_rcp28pd_mask(A, (__v8df)_mm512_setzero_pd(), -1, C) + +#define _mm512_mask_rcp28_round_pd(W, U, A, C) \ + __builtin_ia32_rcp28pd_mask(A, W, U, C) + +#define _mm512_maskz_rcp28_round_pd(U, A, C) \ + __builtin_ia32_rcp28pd_mask(A, (__v8df)_mm512_setzero_pd(), U, C) + +#define _mm512_rcp28_round_ps(A, C) \ + __builtin_ia32_rcp28ps_mask(A, (__v16sf)_mm512_setzero_ps(), -1, C) + +#define _mm512_mask_rcp28_round_ps(W, U, A, C) \ + __builtin_ia32_rcp28ps_mask(A, W, U, C) + +#define _mm512_maskz_rcp28_round_ps(U, A, C) \ + __builtin_ia32_rcp28ps_mask(A, (__v16sf)_mm512_setzero_ps(), U, C) + +#define _mm512_rsqrt28_round_pd(A, C) \ + __builtin_ia32_rsqrt28pd_mask(A, (__v8df)_mm512_setzero_pd(), -1, C) + +#define _mm512_mask_rsqrt28_round_pd(W, U, A, C) \ + __builtin_ia32_rsqrt28pd_mask(A, W, U, C) + +#define _mm512_maskz_rsqrt28_round_pd(U, A, C) \ + __builtin_ia32_rsqrt28pd_mask(A, (__v8df)_mm512_setzero_pd(), U, C) + +#define _mm512_rsqrt28_round_ps(A, C) \ + __builtin_ia32_rsqrt28ps_mask(A, (__v16sf)_mm512_setzero_ps(), -1, C) + +#define _mm512_mask_rsqrt28_round_ps(W, U, A, C) \ + __builtin_ia32_rsqrt28ps_mask(A, W, U, C) + +#define _mm512_maskz_rsqrt28_round_ps(U, A, C) \ + __builtin_ia32_rsqrt28ps_mask(A, (__v16sf)_mm512_setzero_ps(), U, C) + +#define _mm_rcp28_round_sd(A, B, R) \ + __builtin_ia32_rcp28sd_round(A, B, R) + +#define _mm_mask_rcp28_round_sd(W, U, A, B, R) \ + __builtin_ia32_rcp28sd_mask_round ((A), (B), (W), (U), (R)) + +#define _mm_maskz_rcp28_round_sd(U, A, B, R) \ + __builtin_ia32_rcp28sd_mask_round ((A), (B), (__v2df) _mm_setzero_pd (), \ + (U), (R)) + +#define _mm_rcp28_round_ss(A, B, R) \ + __builtin_ia32_rcp28ss_round(A, B, R) + +#define _mm_mask_rcp28_round_ss(W, U, A, B, R) \ + __builtin_ia32_rcp28ss_mask_round ((A), (B), (W), (U), (R)) + +#define _mm_maskz_rcp28_round_ss(U, A, B, R) \ + __builtin_ia32_rcp28ss_mask_round ((A), (B), (__v4sf) _mm_setzero_ps (), \ + (U), (R)) + +#define _mm_rsqrt28_round_sd(A, B, R) \ + __builtin_ia32_rsqrt28sd_round(A, B, R) + +#define _mm_mask_rsqrt28_round_sd(W, U, A, B, R) \ + __builtin_ia32_rsqrt28sd_mask_round ((A), (B), (W), (U), (R)) + +#define _mm_maskz_rsqrt28_round_sd(U, A, B, R) \ + __builtin_ia32_rsqrt28sd_mask_round ((A), (B), (__v2df) _mm_setzero_pd (),\ + (U), (R)) + +#define _mm_rsqrt28_round_ss(A, B, R) \ + __builtin_ia32_rsqrt28ss_round(A, B, R) + +#define _mm_mask_rsqrt28_round_ss(W, U, A, B, R) \ + __builtin_ia32_rsqrt28ss_mask_round ((A), (B), (W), (U), (R)) + +#define _mm_maskz_rsqrt28_round_ss(U, A, B, R) \ + __builtin_ia32_rsqrt28ss_mask_round ((A), (B), (__v4sf) _mm_setzero_ps (),\ + (U), (R)) + +#endif + +#define _mm_mask_rcp28_sd(W, U, A, B)\ + _mm_mask_rcp28_round_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rcp28_sd(U, A, B)\ + _mm_maskz_rcp28_round_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rcp28_ss(W, U, A, B)\ + _mm_mask_rcp28_round_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rcp28_ss(U, A, B)\ + _mm_maskz_rcp28_round_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rsqrt28_sd(W, U, A, B)\ + _mm_mask_rsqrt28_round_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rsqrt28_sd(U, A, B)\ + _mm_maskz_rsqrt28_round_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rsqrt28_ss(W, U, A, B)\ + _mm_mask_rsqrt28_round_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rsqrt28_ss(U, A, B)\ + _mm_maskz_rsqrt28_round_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_exp2a23_pd(A) \ + _mm512_exp2a23_round_pd(A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_exp2a23_pd(W, U, A) \ + _mm512_mask_exp2a23_round_pd(W, U, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_exp2a23_pd(U, A) \ + _mm512_maskz_exp2a23_round_pd(U, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_exp2a23_ps(A) \ + _mm512_exp2a23_round_ps(A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_exp2a23_ps(W, U, A) \ + _mm512_mask_exp2a23_round_ps(W, U, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_exp2a23_ps(U, A) \ + _mm512_maskz_exp2a23_round_ps(U, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_rcp28_pd(A) \ + _mm512_rcp28_round_pd(A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rcp28_pd(W, U, A) \ + _mm512_mask_rcp28_round_pd(W, U, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rcp28_pd(U, A) \ + _mm512_maskz_rcp28_round_pd(U, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_rcp28_ps(A) \ + _mm512_rcp28_round_ps(A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rcp28_ps(W, U, A) \ + _mm512_mask_rcp28_round_ps(W, U, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rcp28_ps(U, A) \ + _mm512_maskz_rcp28_round_ps(U, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_rsqrt28_pd(A) \ + _mm512_rsqrt28_round_pd(A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rsqrt28_pd(W, U, A) \ + _mm512_mask_rsqrt28_round_pd(W, U, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rsqrt28_pd(U, A) \ + _mm512_maskz_rsqrt28_round_pd(U, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_rsqrt28_ps(A) \ + _mm512_rsqrt28_round_ps(A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rsqrt28_ps(W, U, A) \ + _mm512_mask_rsqrt28_round_ps(W, U, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rsqrt28_ps(U, A) \ + _mm512_maskz_rsqrt28_round_ps(U, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm_rcp28_sd(A, B) \ + __builtin_ia32_rcp28sd_round(B, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm_rcp28_ss(A, B) \ + __builtin_ia32_rcp28ss_round(B, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm_rsqrt28_sd(A, B) \ + __builtin_ia32_rsqrt28sd_round(B, A, _MM_FROUND_CUR_DIRECTION) + +#define _mm_rsqrt28_ss(A, B) \ + __builtin_ia32_rsqrt28ss_round(B, A, _MM_FROUND_CUR_DIRECTION) + +#ifdef __DISABLE_AVX512ER__ +#undef __DISABLE_AVX512ER__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512ER__ */ + +#endif /* _AVX512ERINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512fintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512fintrin.h new file mode 100644 index 0000000..77d6249 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512fintrin.h @@ -0,0 +1,16474 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512FINTRIN_H_INCLUDED +#define _AVX512FINTRIN_H_INCLUDED + +#ifndef __AVX512F__ +#pragma GCC push_options +#pragma GCC target("avx512f") +#define __DISABLE_AVX512F__ +#endif /* __AVX512F__ */ + +/* Internal data types for implementing the intrinsics. */ +typedef double __v8df __attribute__ ((__vector_size__ (64))); +typedef float __v16sf __attribute__ ((__vector_size__ (64))); +typedef long long __v8di __attribute__ ((__vector_size__ (64))); +typedef unsigned long long __v8du __attribute__ ((__vector_size__ (64))); +typedef int __v16si __attribute__ ((__vector_size__ (64))); +typedef unsigned int __v16su __attribute__ ((__vector_size__ (64))); +typedef short __v32hi __attribute__ ((__vector_size__ (64))); +typedef unsigned short __v32hu __attribute__ ((__vector_size__ (64))); +typedef char __v64qi __attribute__ ((__vector_size__ (64))); +typedef unsigned char __v64qu __attribute__ ((__vector_size__ (64))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef float __m512 __attribute__ ((__vector_size__ (64), __may_alias__)); +typedef long long __m512i __attribute__ ((__vector_size__ (64), __may_alias__)); +typedef double __m512d __attribute__ ((__vector_size__ (64), __may_alias__)); + +/* Unaligned version of the same type. */ +typedef float __m512_u __attribute__ ((__vector_size__ (64), __may_alias__, __aligned__ (1))); +typedef long long __m512i_u __attribute__ ((__vector_size__ (64), __may_alias__, __aligned__ (1))); +typedef double __m512d_u __attribute__ ((__vector_size__ (64), __may_alias__, __aligned__ (1))); + +typedef unsigned char __mmask8; +typedef unsigned short __mmask16; + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_int2mask (int __M) +{ + return (__mmask16) __M; +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask2int (__mmask16 __M) +{ + return (int) __M; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set_epi64 (long long __A, long long __B, long long __C, + long long __D, long long __E, long long __F, + long long __G, long long __H) +{ + return __extension__ (__m512i) (__v8di) + { __H, __G, __F, __E, __D, __C, __B, __A }; +} + +/* Create the vector [A B C D E F G H I J K L M N O P]. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set_epi32 (int __A, int __B, int __C, int __D, + int __E, int __F, int __G, int __H, + int __I, int __J, int __K, int __L, + int __M, int __N, int __O, int __P) +{ + return __extension__ (__m512i)(__v16si) + { __P, __O, __N, __M, __L, __K, __J, __I, + __H, __G, __F, __E, __D, __C, __B, __A }; +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set_epi16 (short __q31, short __q30, short __q29, short __q28, + short __q27, short __q26, short __q25, short __q24, + short __q23, short __q22, short __q21, short __q20, + short __q19, short __q18, short __q17, short __q16, + short __q15, short __q14, short __q13, short __q12, + short __q11, short __q10, short __q09, short __q08, + short __q07, short __q06, short __q05, short __q04, + short __q03, short __q02, short __q01, short __q00) +{ + return __extension__ (__m512i)(__v32hi){ + __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15, + __q16, __q17, __q18, __q19, __q20, __q21, __q22, __q23, + __q24, __q25, __q26, __q27, __q28, __q29, __q30, __q31 + }; +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set_epi8 (char __q63, char __q62, char __q61, char __q60, + char __q59, char __q58, char __q57, char __q56, + char __q55, char __q54, char __q53, char __q52, + char __q51, char __q50, char __q49, char __q48, + char __q47, char __q46, char __q45, char __q44, + char __q43, char __q42, char __q41, char __q40, + char __q39, char __q38, char __q37, char __q36, + char __q35, char __q34, char __q33, char __q32, + char __q31, char __q30, char __q29, char __q28, + char __q27, char __q26, char __q25, char __q24, + char __q23, char __q22, char __q21, char __q20, + char __q19, char __q18, char __q17, char __q16, + char __q15, char __q14, char __q13, char __q12, + char __q11, char __q10, char __q09, char __q08, + char __q07, char __q06, char __q05, char __q04, + char __q03, char __q02, char __q01, char __q00) +{ + return __extension__ (__m512i)(__v64qi){ + __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15, + __q16, __q17, __q18, __q19, __q20, __q21, __q22, __q23, + __q24, __q25, __q26, __q27, __q28, __q29, __q30, __q31, + __q32, __q33, __q34, __q35, __q36, __q37, __q38, __q39, + __q40, __q41, __q42, __q43, __q44, __q45, __q46, __q47, + __q48, __q49, __q50, __q51, __q52, __q53, __q54, __q55, + __q56, __q57, __q58, __q59, __q60, __q61, __q62, __q63 + }; +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set_pd (double __A, double __B, double __C, double __D, + double __E, double __F, double __G, double __H) +{ + return __extension__ (__m512d) + { __H, __G, __F, __E, __D, __C, __B, __A }; +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set_ps (float __A, float __B, float __C, float __D, + float __E, float __F, float __G, float __H, + float __I, float __J, float __K, float __L, + float __M, float __N, float __O, float __P) +{ + return __extension__ (__m512) + { __P, __O, __N, __M, __L, __K, __J, __I, + __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7) \ + _mm512_set_epi64(e7,e6,e5,e4,e3,e2,e1,e0) + +#define _mm512_setr_epi32(e0,e1,e2,e3,e4,e5,e6,e7, \ + e8,e9,e10,e11,e12,e13,e14,e15) \ + _mm512_set_epi32(e15,e14,e13,e12,e11,e10,e9,e8,e7,e6,e5,e4,e3,e2,e1,e0) + +#define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7) \ + _mm512_set_pd(e7,e6,e5,e4,e3,e2,e1,e0) + +#define _mm512_setr_ps(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15) \ + _mm512_set_ps(e15,e14,e13,e12,e11,e10,e9,e8,e7,e6,e5,e4,e3,e2,e1,e0) + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_undefined_ps (void) +{ + __m512 __Y = __Y; + return __Y; +} + +#define _mm512_undefined _mm512_undefined_ps + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_undefined_pd (void) +{ + __m512d __Y = __Y; + return __Y; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_undefined_epi32 (void) +{ + __m512i __Y = __Y; + return __Y; +} + +#define _mm512_undefined_si512 _mm512_undefined_epi32 + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set1_epi8 (char __A) +{ + return __extension__ (__m512i)(__v64qi) + { __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A }; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set1_epi16 (short __A) +{ + return __extension__ (__m512i)(__v32hi) + { __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A }; +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set1_pd (double __A) +{ + return __extension__ (__m512d)(__v8df) + { __A, __A, __A, __A, __A, __A, __A, __A }; +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set1_ps (float __A) +{ + return __extension__ (__m512)(__v16sf) + { __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A }; +} + +/* Create the vector [A B C D A B C D A B C D A B C D]. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set4_epi32 (int __A, int __B, int __C, int __D) +{ + return __extension__ (__m512i)(__v16si) + { __D, __C, __B, __A, __D, __C, __B, __A, + __D, __C, __B, __A, __D, __C, __B, __A }; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set4_epi64 (long long __A, long long __B, long long __C, + long long __D) +{ + return __extension__ (__m512i) (__v8di) + { __D, __C, __B, __A, __D, __C, __B, __A }; +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set4_pd (double __A, double __B, double __C, double __D) +{ + return __extension__ (__m512d) + { __D, __C, __B, __A, __D, __C, __B, __A }; +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set4_ps (float __A, float __B, float __C, float __D) +{ + return __extension__ (__m512) + { __D, __C, __B, __A, __D, __C, __B, __A, + __D, __C, __B, __A, __D, __C, __B, __A }; +} + +#define _mm512_setr4_epi64(e0,e1,e2,e3) \ + _mm512_set4_epi64(e3,e2,e1,e0) + +#define _mm512_setr4_epi32(e0,e1,e2,e3) \ + _mm512_set4_epi32(e3,e2,e1,e0) + +#define _mm512_setr4_pd(e0,e1,e2,e3) \ + _mm512_set4_pd(e3,e2,e1,e0) + +#define _mm512_setr4_ps(e0,e1,e2,e3) \ + _mm512_set4_ps(e3,e2,e1,e0) + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_setzero_ps (void) +{ + return __extension__ (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_setzero (void) +{ + return _mm512_setzero_ps (); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_setzero_pd (void) +{ + return __extension__ (__m512d) { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_setzero_epi32 (void) +{ + return __extension__ (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 }; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_setzero_si512 (void) +{ + return __extension__ (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 }; +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_movapd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_movapd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_movaps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_movaps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_load_pd (void const *__P) +{ + return *(__m512d *) __P; +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *) __P, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_load_pd (__mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *) __P, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_store_pd (void *__P, __m512d __A) +{ + *(__m512d *) __P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_store_pd (void *__P, __mmask8 __U, __m512d __A) +{ + __builtin_ia32_storeapd512_mask ((__v8df *) __P, (__v8df) __A, + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_load_ps (void const *__P) +{ + return *(__m512 *) __P; +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *) __P, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_load_ps (__mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *) __P, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_store_ps (void *__P, __m512 __A) +{ + *(__m512 *) __P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_store_ps (void *__P, __mmask16 __U, __m512 __A) +{ + __builtin_ia32_storeaps512_mask ((__v16sf *) __P, (__v16sf) __A, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_movdqa64_512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_movdqa64_512_mask ((__v8di) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_load_epi64 (void const *__P) +{ + return *(__m512i *) __P; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_store_epi64 (void *__P, __m512i __A) +{ + *(__m512i *) __P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_movdqa32_512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_movdqa32_512_mask ((__v16si) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_load_si512 (void const *__P) +{ + return *(__m512i *) __P; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_load_epi32 (void const *__P) +{ + return *(__m512i *) __P; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_load_epi32 (__mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_store_si512 (void *__P, __m512i __A) +{ + *(__m512i *) __P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_store_epi32 (void *__P, __m512i __A) +{ + *(__m512i *) __P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mullo_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A * (__v16su) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mullo_epi32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mullo_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mullox_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8du) __A * (__v8du) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mullox_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return _mm512_mask_mov_epi64 (__W, __M, _mm512_mullox_epi64 (__A, __B)); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sllv_epi32 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X, + (__v16si) __Y, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sllv_epi32 (__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X, + (__v16si) __Y, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sllv_epi32 (__mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X, + (__v16si) __Y, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srav_epi32 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X, + (__v16si) __Y, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srav_epi32 (__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X, + (__v16si) __Y, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srav_epi32 (__mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X, + (__v16si) __Y, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srlv_epi32 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X, + (__v16si) __Y, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srlv_epi32 (__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X, + (__v16si) __Y, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srlv_epi32 (__mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X, + (__v16si) __Y, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_add_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8du) __A + (__v8du) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_add_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_add_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sub_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8du) __A - (__v8du) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sub_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sub_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sllv_epi64 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sllv_epi64 (__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sllv_epi64 (__mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srav_epi64 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srav_epi64 (__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srav_epi64 (__mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srlv_epi64 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srlv_epi64 (__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srlv_epi64 (__mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_add_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A + (__v16su) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_add_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mul_epi32 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X, + (__v16si) __Y, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mul_epi32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X, + (__v16si) __Y, + (__v8di) __W, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mul_epi32 (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X, + (__v16si) __Y, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sub_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A - (__v16su) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sub_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sub_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mul_epu32 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X, + (__v16si) __Y, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mul_epu32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X, + (__v16si) __Y, + (__v8di) __W, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mul_epu32 (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X, + (__v16si) __Y, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_slli_epi64 (__m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_psllqi512_mask ((__v8di) __A, __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_slli_epi64 (__m512i __W, __mmask8 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i) __builtin_ia32_psllqi512_mask ((__v8di) __A, __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_slli_epi64 (__mmask8 __U, __m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_psllqi512_mask ((__v8di) __A, __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} +#else +#define _mm512_slli_epi64(X, C) \ + ((__m512i) __builtin_ia32_psllqi512_mask ((__v8di)(__m512i)(X), (int)(C),\ + (__v8di)(__m512i)_mm512_undefined_epi32 (),\ + (__mmask8)-1)) + +#define _mm512_mask_slli_epi64(W, U, X, C) \ + ((__m512i) __builtin_ia32_psllqi512_mask ((__v8di)(__m512i)(X), (int)(C),\ + (__v8di)(__m512i)(W),\ + (__mmask8)(U))) + +#define _mm512_maskz_slli_epi64(U, X, C) \ + ((__m512i) __builtin_ia32_psllqi512_mask ((__v8di)(__m512i)(X), (int)(C),\ + (__v8di)(__m512i)_mm512_setzero_si512 (),\ + (__mmask8)(U))) +#endif + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sll_epi64 (__m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A, + (__v2di) __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sll_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A, + (__v2di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sll_epi64 (__mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A, + (__v2di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srli_epi64 (__m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_psrlqi512_mask ((__v8di) __A, __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srli_epi64 (__m512i __W, __mmask8 __U, + __m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_psrlqi512_mask ((__v8di) __A, __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srli_epi64 (__mmask8 __U, __m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_psrlqi512_mask ((__v8di) __A, __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} +#else +#define _mm512_srli_epi64(X, C) \ + ((__m512i) __builtin_ia32_psrlqi512_mask ((__v8di)(__m512i)(X), (int)(C),\ + (__v8di)(__m512i)_mm512_undefined_epi32 (),\ + (__mmask8)-1)) + +#define _mm512_mask_srli_epi64(W, U, X, C) \ + ((__m512i) __builtin_ia32_psrlqi512_mask ((__v8di)(__m512i)(X), (int)(C),\ + (__v8di)(__m512i)(W),\ + (__mmask8)(U))) + +#define _mm512_maskz_srli_epi64(U, X, C) \ + ((__m512i) __builtin_ia32_psrlqi512_mask ((__v8di)(__m512i)(X), (int)(C),\ + (__v8di)(__m512i)_mm512_setzero_si512 (),\ + (__mmask8)(U))) +#endif + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srl_epi64 (__m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A, + (__v2di) __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srl_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A, + (__v2di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srl_epi64 (__mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A, + (__v2di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srai_epi64 (__m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_psraqi512_mask ((__v8di) __A, __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srai_epi64 (__m512i __W, __mmask8 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i) __builtin_ia32_psraqi512_mask ((__v8di) __A, __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srai_epi64 (__mmask8 __U, __m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_psraqi512_mask ((__v8di) __A, __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} +#else +#define _mm512_srai_epi64(X, C) \ + ((__m512i) __builtin_ia32_psraqi512_mask ((__v8di)(__m512i)(X), (int)(C),\ + (__v8di)(__m512i)_mm512_undefined_epi32 (),\ + (__mmask8)-1)) + +#define _mm512_mask_srai_epi64(W, U, X, C) \ + ((__m512i) __builtin_ia32_psraqi512_mask ((__v8di)(__m512i)(X), (int)(C),\ + (__v8di)(__m512i)(W),\ + (__mmask8)(U))) + +#define _mm512_maskz_srai_epi64(U, X, C) \ + ((__m512i) __builtin_ia32_psraqi512_mask ((__v8di)(__m512i)(X), (int)(C),\ + (__v8di)(__m512i)_mm512_setzero_si512 (),\ + (__mmask8)(U))) +#endif + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sra_epi64 (__m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A, + (__v2di) __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sra_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A, + (__v2di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sra_epi64 (__mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A, + (__v2di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_slli_epi32 (__m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_pslldi512_mask ((__v16si) __A, __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_slli_epi32 (__m512i __W, __mmask16 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i) __builtin_ia32_pslldi512_mask ((__v16si) __A, __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_slli_epi32 (__mmask16 __U, __m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_pslldi512_mask ((__v16si) __A, __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} +#else +#define _mm512_slli_epi32(X, C) \ + ((__m512i) __builtin_ia32_pslldi512_mask ((__v16si)(__m512i)(X), (int)(C),\ + (__v16si)(__m512i)_mm512_undefined_epi32 (),\ + (__mmask16)-1)) + +#define _mm512_mask_slli_epi32(W, U, X, C) \ + ((__m512i) __builtin_ia32_pslldi512_mask ((__v16si)(__m512i)(X), (int)(C),\ + (__v16si)(__m512i)(W),\ + (__mmask16)(U))) + +#define _mm512_maskz_slli_epi32(U, X, C) \ + ((__m512i) __builtin_ia32_pslldi512_mask ((__v16si)(__m512i)(X), (int)(C),\ + (__v16si)(__m512i)_mm512_setzero_si512 (),\ + (__mmask16)(U))) +#endif + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sll_epi32 (__m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A, + (__v4si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sll_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A, + (__v4si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sll_epi32 (__mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A, + (__v4si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srli_epi32 (__m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_psrldi512_mask ((__v16si) __A, __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srli_epi32 (__m512i __W, __mmask16 __U, + __m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_psrldi512_mask ((__v16si) __A, __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srli_epi32 (__mmask16 __U, __m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_psrldi512_mask ((__v16si) __A, __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} +#else +#define _mm512_srli_epi32(X, C) \ + ((__m512i) __builtin_ia32_psrldi512_mask ((__v16si)(__m512i)(X), (int)(C),\ + (__v16si)(__m512i)_mm512_undefined_epi32 (),\ + (__mmask16)-1)) + +#define _mm512_mask_srli_epi32(W, U, X, C) \ + ((__m512i) __builtin_ia32_psrldi512_mask ((__v16si)(__m512i)(X), (int)(C),\ + (__v16si)(__m512i)(W),\ + (__mmask16)(U))) + +#define _mm512_maskz_srli_epi32(U, X, C) \ + ((__m512i) __builtin_ia32_psrldi512_mask ((__v16si)(__m512i)(X), (int)(C),\ + (__v16si)(__m512i)_mm512_setzero_si512 (),\ + (__mmask16)(U))) +#endif + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srl_epi32 (__m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A, + (__v4si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srl_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A, + (__v4si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srl_epi32 (__mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A, + (__v4si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_srai_epi32 (__m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_psradi512_mask ((__v16si) __A, __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_srai_epi32 (__m512i __W, __mmask16 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i) __builtin_ia32_psradi512_mask ((__v16si) __A, __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_srai_epi32 (__mmask16 __U, __m512i __A, unsigned int __B) +{ + return (__m512i) __builtin_ia32_psradi512_mask ((__v16si) __A, __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} +#else +#define _mm512_srai_epi32(X, C) \ + ((__m512i) __builtin_ia32_psradi512_mask ((__v16si)(__m512i)(X), (int)(C),\ + (__v16si)(__m512i)_mm512_undefined_epi32 (),\ + (__mmask16)-1)) + +#define _mm512_mask_srai_epi32(W, U, X, C) \ + ((__m512i) __builtin_ia32_psradi512_mask ((__v16si)(__m512i)(X), (int)(C),\ + (__v16si)(__m512i)(W),\ + (__mmask16)(U))) + +#define _mm512_maskz_srai_epi32(U, X, C) \ + ((__m512i) __builtin_ia32_psradi512_mask ((__v16si)(__m512i)(X), (int)(C),\ + (__v16si)(__m512i)_mm512_setzero_si512 (),\ + (__mmask16)(U))) +#endif + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sra_epi32 (__m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A, + (__v4si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sra_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A, + (__v4si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sra_epi32 (__mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A, + (__v4si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_round_sd (__m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, + (__v2df) __B, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_round_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_addsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_round_sd (__mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_addsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_round_ss (__m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_addss_round ((__v4sf) __A, + (__v4sf) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_round_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_addss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_round_ss (__mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_addss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_round_sd (__m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, + (__v2df) __B, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_round_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_subsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_round_sd (__mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_subsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_round_ss (__m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_subss_round ((__v4sf) __A, + (__v4sf) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_round_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_subss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_round_ss (__mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_subss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, __R); +} + +#else +#define _mm_add_round_sd(A, B, C) \ + (__m128d)__builtin_ia32_addsd_round(A, B, C) + +#define _mm_mask_add_round_sd(W, U, A, B, C) \ + (__m128d)__builtin_ia32_addsd_mask_round(A, B, W, U, C) + +#define _mm_maskz_add_round_sd(U, A, B, C) \ + (__m128d)__builtin_ia32_addsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) + +#define _mm_add_round_ss(A, B, C) \ + (__m128)__builtin_ia32_addss_round(A, B, C) + +#define _mm_mask_add_round_ss(W, U, A, B, C) \ + (__m128)__builtin_ia32_addss_mask_round(A, B, W, U, C) + +#define _mm_maskz_add_round_ss(U, A, B, C) \ + (__m128)__builtin_ia32_addss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) + +#define _mm_sub_round_sd(A, B, C) \ + (__m128d)__builtin_ia32_subsd_round(A, B, C) + +#define _mm_mask_sub_round_sd(W, U, A, B, C) \ + (__m128d)__builtin_ia32_subsd_mask_round(A, B, W, U, C) + +#define _mm_maskz_sub_round_sd(U, A, B, C) \ + (__m128d)__builtin_ia32_subsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) + +#define _mm_sub_round_ss(A, B, C) \ + (__m128)__builtin_ia32_subss_round(A, B, C) + +#define _mm_mask_sub_round_ss(W, U, A, B, C) \ + (__m128)__builtin_ia32_subss_mask_round(A, B, W, U, C) + +#define _mm_maskz_sub_round_ss(U, A, B, C) \ + (__m128)__builtin_ia32_subss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) + +#endif + +/* Constant helper to represent the ternary logic operations among + vector A, B and C. */ +typedef enum +{ + _MM_TERNLOG_A = 0xF0, + _MM_TERNLOG_B = 0xCC, + _MM_TERNLOG_C = 0xAA +} _MM_TERNLOG_ENUM; + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_ternarylogic_epi64 (__m512i __A, __m512i __B, __m512i __C, + const int __imm) +{ + return (__m512i) + __builtin_ia32_pternlogq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __C, + (unsigned char) __imm, + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_ternarylogic_epi64 (__m512i __A, __mmask8 __U, __m512i __B, + __m512i __C, const int __imm) +{ + return (__m512i) + __builtin_ia32_pternlogq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __C, + (unsigned char) __imm, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_ternarylogic_epi64 (__mmask8 __U, __m512i __A, __m512i __B, + __m512i __C, const int __imm) +{ + return (__m512i) + __builtin_ia32_pternlogq512_maskz ((__v8di) __A, + (__v8di) __B, + (__v8di) __C, + (unsigned char) __imm, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_ternarylogic_epi32 (__m512i __A, __m512i __B, __m512i __C, + const int __imm) +{ + return (__m512i) + __builtin_ia32_pternlogd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __C, + (unsigned char) __imm, + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_ternarylogic_epi32 (__m512i __A, __mmask16 __U, __m512i __B, + __m512i __C, const int __imm) +{ + return (__m512i) + __builtin_ia32_pternlogd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __C, + (unsigned char) __imm, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_ternarylogic_epi32 (__mmask16 __U, __m512i __A, __m512i __B, + __m512i __C, const int __imm) +{ + return (__m512i) + __builtin_ia32_pternlogd512_maskz ((__v16si) __A, + (__v16si) __B, + (__v16si) __C, + (unsigned char) __imm, + (__mmask16) __U); +} +#else +#define _mm512_ternarylogic_epi64(A, B, C, I) \ + ((__m512i) \ + __builtin_ia32_pternlogq512_mask ((__v8di) (__m512i) (A), \ + (__v8di) (__m512i) (B), \ + (__v8di) (__m512i) (C), \ + (unsigned char) (I), \ + (__mmask8) -1)) +#define _mm512_mask_ternarylogic_epi64(A, U, B, C, I) \ + ((__m512i) \ + __builtin_ia32_pternlogq512_mask ((__v8di) (__m512i) (A), \ + (__v8di) (__m512i) (B), \ + (__v8di) (__m512i) (C), \ + (unsigned char)(I), \ + (__mmask8) (U))) +#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, I) \ + ((__m512i) \ + __builtin_ia32_pternlogq512_maskz ((__v8di) (__m512i) (A), \ + (__v8di) (__m512i) (B), \ + (__v8di) (__m512i) (C), \ + (unsigned char) (I), \ + (__mmask8) (U))) +#define _mm512_ternarylogic_epi32(A, B, C, I) \ + ((__m512i) \ + __builtin_ia32_pternlogd512_mask ((__v16si) (__m512i) (A), \ + (__v16si) (__m512i) (B), \ + (__v16si) (__m512i) (C), \ + (unsigned char) (I), \ + (__mmask16) -1)) +#define _mm512_mask_ternarylogic_epi32(A, U, B, C, I) \ + ((__m512i) \ + __builtin_ia32_pternlogd512_mask ((__v16si) (__m512i) (A), \ + (__v16si) (__m512i) (B), \ + (__v16si) (__m512i) (C), \ + (unsigned char) (I), \ + (__mmask16) (U))) +#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, I) \ + ((__m512i) \ + __builtin_ia32_pternlogd512_maskz ((__v16si) (__m512i) (A), \ + (__v16si) (__m512i) (B), \ + (__v16si) (__m512i) (C), \ + (unsigned char) (I), \ + (__mmask16) (U))) +#endif + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rcp14_pd (__m512d __A) +{ + return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rcp14_ps (__m512 __A) +{ + return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp14_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd ((__v2df) __B, + (__v2df) __A); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __B, + (__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __B, + (__v2df) __A, + (__v2df) _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp14_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss ((__v4sf) __B, + (__v4sf) __A); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __B, + (__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __B, + (__v4sf) __A, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rsqrt14_pd (__m512d __A) +{ + return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rsqrt14_ps (__m512 __A) +{ + return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt14_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd ((__v2df) __B, + (__v2df) __A); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __B, + (__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __B, + (__v2df) __A, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt14_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss ((__v4sf) __B, + (__v4sf) __A); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __B, + (__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __B, + (__v4sf) __A, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sqrt_round_pd (__m512d __A, const int __R) +{ + return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sqrt_round_pd (__m512d __W, __mmask8 __U, __m512d __A, + const int __R) +{ + return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sqrt_round_pd (__mmask8 __U, __m512d __A, const int __R) +{ + return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sqrt_round_ps (__m512 __A, const int __R) +{ + return (__m512) __builtin_ia32_sqrtps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sqrt_round_ps (__m512 __W, __mmask16 __U, __m512 __A, const int __R) +{ + return (__m512) __builtin_ia32_sqrtps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sqrt_round_ps (__mmask16 __U, __m512 __A, const int __R) +{ + return (__m512) __builtin_ia32_sqrtps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_round_sd (__m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_sqrtsd_mask_round ((__v2df) __B, + (__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sqrt_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_sqrtsd_mask_round ((__v2df) __B, + (__v2df) __A, + (__v2df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sqrt_round_sd (__mmask8 __U, __m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_sqrtsd_mask_round ((__v2df) __B, + (__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_round_ss (__m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_sqrtss_mask_round ((__v4sf) __B, + (__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sqrt_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_sqrtss_mask_round ((__v4sf) __B, + (__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sqrt_round_ss (__mmask8 __U, __m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_sqrtss_mask_round ((__v4sf) __B, + (__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, __R); +} +#else +#define _mm512_sqrt_round_pd(A, C) \ + (__m512d)__builtin_ia32_sqrtpd512_mask(A, (__v8df)_mm512_undefined_pd(), -1, C) + +#define _mm512_mask_sqrt_round_pd(W, U, A, C) \ + (__m512d)__builtin_ia32_sqrtpd512_mask(A, W, U, C) + +#define _mm512_maskz_sqrt_round_pd(U, A, C) \ + (__m512d)__builtin_ia32_sqrtpd512_mask(A, (__v8df)_mm512_setzero_pd(), U, C) + +#define _mm512_sqrt_round_ps(A, C) \ + (__m512)__builtin_ia32_sqrtps512_mask(A, (__v16sf)_mm512_undefined_ps(), -1, C) + +#define _mm512_mask_sqrt_round_ps(W, U, A, C) \ + (__m512)__builtin_ia32_sqrtps512_mask(A, W, U, C) + +#define _mm512_maskz_sqrt_round_ps(U, A, C) \ + (__m512)__builtin_ia32_sqrtps512_mask(A, (__v16sf)_mm512_setzero_ps(), U, C) + +#define _mm_sqrt_round_sd(A, B, C) \ + (__m128d)__builtin_ia32_sqrtsd_mask_round (B, A, \ + (__v2df) _mm_setzero_pd (), -1, C) + +#define _mm_mask_sqrt_round_sd(W, U, A, B, C) \ + (__m128d)__builtin_ia32_sqrtsd_mask_round (B, A, W, U, C) + +#define _mm_maskz_sqrt_round_sd(U, A, B, C) \ + (__m128d)__builtin_ia32_sqrtsd_mask_round (B, A, \ + (__v2df) _mm_setzero_pd (), U, C) + +#define _mm_sqrt_round_ss(A, B, C) \ + (__m128)__builtin_ia32_sqrtss_mask_round (B, A, \ + (__v4sf) _mm_setzero_ps (), -1, C) + +#define _mm_mask_sqrt_round_ss(W, U, A, B, C) \ + (__m128)__builtin_ia32_sqrtss_mask_round (B, A, W, U, C) + +#define _mm_maskz_sqrt_round_ss(U, A, B, C) \ + (__m128)__builtin_ia32_sqrtss_mask_round (B, A, \ + (__v4sf) _mm_setzero_ps (), U, C) +#endif + +#define _mm_mask_sqrt_sd(W, U, A, B) \ + _mm_mask_sqrt_round_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_sqrt_sd(U, A, B) \ + _mm_maskz_sqrt_round_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_sqrt_ss(W, U, A, B) \ + _mm_mask_sqrt_round_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_sqrt_ss(U, A, B) \ + _mm_maskz_sqrt_round_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi8_epi32 (__m128i __A) +{ + return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi8_epi32 (__m512i __W, __mmask16 __U, __m128i __A) +{ + return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi8_epi32 (__mmask16 __U, __m128i __A) +{ + return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi8_epi64 (__m128i __A) +{ + return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi8_epi64 (__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi16_epi32 (__m256i __A) +{ + return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi16_epi32 (__m512i __W, __mmask16 __U, __m256i __A) +{ + return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi16_epi32 (__mmask16 __U, __m256i __A) +{ + return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi16_epi64 (__m128i __A) +{ + return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi16_epi64 (__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi32_epi64 (__m256i __X) +{ + return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi32_epi64 (__m512i __W, __mmask8 __U, __m256i __X) +{ + return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi32_epi64 (__mmask8 __U, __m256i __X) +{ + return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepu8_epi32 (__m128i __A) +{ + return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu8_epi32 (__m512i __W, __mmask16 __U, __m128i __A) +{ + return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu8_epi32 (__mmask16 __U, __m128i __A) +{ + return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepu8_epi64 (__m128i __A) +{ + return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu8_epi64 (__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepu16_epi32 (__m256i __A) +{ + return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu16_epi32 (__m512i __W, __mmask16 __U, __m256i __A) +{ + return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu16_epi32 (__mmask16 __U, __m256i __A) +{ + return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepu16_epi64 (__m128i __A) +{ + return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu16_epi64 (__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepu32_epi64 (__m256i __X) +{ + return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu32_epi64 (__m512i __W, __mmask8 __U, __m256i __X) +{ + return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu32_epi64 (__mmask8 __U, __m256i __X) +{ + return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_add_round_pd (__m512d __A, __m512d __B, const int __R) +{ + return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_add_round_pd (__m512d __W, __mmask8 __U, __m512d __A, + __m512d __B, const int __R) +{ + return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_add_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + const int __R) +{ + return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_add_round_ps (__m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_add_round_ps (__m512 __W, __mmask16 __U, __m512 __A, + __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_add_round_ps (__mmask16 __U, __m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sub_round_pd (__m512d __A, __m512d __B, const int __R) +{ + return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sub_round_pd (__m512d __W, __mmask8 __U, __m512d __A, + __m512d __B, const int __R) +{ + return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sub_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + const int __R) +{ + return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sub_round_ps (__m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sub_round_ps (__m512 __W, __mmask16 __U, __m512 __A, + __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sub_round_ps (__mmask16 __U, __m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, __R); +} +#else +#define _mm512_add_round_pd(A, B, C) \ + (__m512d)__builtin_ia32_addpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, C) + +#define _mm512_mask_add_round_pd(W, U, A, B, C) \ + (__m512d)__builtin_ia32_addpd512_mask(A, B, W, U, C) + +#define _mm512_maskz_add_round_pd(U, A, B, C) \ + (__m512d)__builtin_ia32_addpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, C) + +#define _mm512_add_round_ps(A, B, C) \ + (__m512)__builtin_ia32_addps512_mask(A, B, (__v16sf)_mm512_undefined_ps(), -1, C) + +#define _mm512_mask_add_round_ps(W, U, A, B, C) \ + (__m512)__builtin_ia32_addps512_mask(A, B, W, U, C) + +#define _mm512_maskz_add_round_ps(U, A, B, C) \ + (__m512)__builtin_ia32_addps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, C) + +#define _mm512_sub_round_pd(A, B, C) \ + (__m512d)__builtin_ia32_subpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, C) + +#define _mm512_mask_sub_round_pd(W, U, A, B, C) \ + (__m512d)__builtin_ia32_subpd512_mask(A, B, W, U, C) + +#define _mm512_maskz_sub_round_pd(U, A, B, C) \ + (__m512d)__builtin_ia32_subpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, C) + +#define _mm512_sub_round_ps(A, B, C) \ + (__m512)__builtin_ia32_subps512_mask(A, B, (__v16sf)_mm512_undefined_ps(), -1, C) + +#define _mm512_mask_sub_round_ps(W, U, A, B, C) \ + (__m512)__builtin_ia32_subps512_mask(A, B, W, U, C) + +#define _mm512_maskz_sub_round_ps(U, A, B, C) \ + (__m512)__builtin_ia32_subps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, C) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mul_round_pd (__m512d __A, __m512d __B, const int __R) +{ + return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mul_round_pd (__m512d __W, __mmask8 __U, __m512d __A, + __m512d __B, const int __R) +{ + return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mul_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + const int __R) +{ + return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mul_round_ps (__m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mul_round_ps (__m512 __W, __mmask16 __U, __m512 __A, + __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mul_round_ps (__mmask16 __U, __m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_div_round_pd (__m512d __M, __m512d __V, const int __R) +{ + return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __M, + (__v8df) __V, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_div_round_pd (__m512d __W, __mmask8 __U, __m512d __M, + __m512d __V, const int __R) +{ + return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __M, + (__v8df) __V, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_div_round_pd (__mmask8 __U, __m512d __M, __m512d __V, + const int __R) +{ + return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __M, + (__v8df) __V, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_div_round_ps (__m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_div_round_ps (__m512 __W, __mmask16 __U, __m512 __A, + __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_div_round_ps (__mmask16 __U, __m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_round_sd (__m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, + (__v2df) __B, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mul_round_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_mulsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mul_round_sd (__mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_mulsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_round_ss (__m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, + (__v4sf) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mul_round_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_mulss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mul_round_ss (__mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_mulss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_round_sd (__m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, + (__v2df) __B, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_div_round_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_divsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_div_round_sd (__mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_divsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_round_ss (__m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_divss_round ((__v4sf) __A, + (__v4sf) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_div_round_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_divss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_div_round_ss (__mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_divss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, __R); +} + +#else +#define _mm512_mul_round_pd(A, B, C) \ + (__m512d)__builtin_ia32_mulpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, C) + +#define _mm512_mask_mul_round_pd(W, U, A, B, C) \ + (__m512d)__builtin_ia32_mulpd512_mask(A, B, W, U, C) + +#define _mm512_maskz_mul_round_pd(U, A, B, C) \ + (__m512d)__builtin_ia32_mulpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, C) + +#define _mm512_mul_round_ps(A, B, C) \ + (__m512)__builtin_ia32_mulps512_mask(A, B, (__v16sf)_mm512_undefined_ps(), -1, C) + +#define _mm512_mask_mul_round_ps(W, U, A, B, C) \ + (__m512)__builtin_ia32_mulps512_mask(A, B, W, U, C) + +#define _mm512_maskz_mul_round_ps(U, A, B, C) \ + (__m512)__builtin_ia32_mulps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, C) + +#define _mm512_div_round_pd(A, B, C) \ + (__m512d)__builtin_ia32_divpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, C) + +#define _mm512_mask_div_round_pd(W, U, A, B, C) \ + (__m512d)__builtin_ia32_divpd512_mask(A, B, W, U, C) + +#define _mm512_maskz_div_round_pd(U, A, B, C) \ + (__m512d)__builtin_ia32_divpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, C) + +#define _mm512_div_round_ps(A, B, C) \ + (__m512)__builtin_ia32_divps512_mask(A, B, (__v16sf)_mm512_undefined_ps(), -1, C) + +#define _mm512_mask_div_round_ps(W, U, A, B, C) \ + (__m512)__builtin_ia32_divps512_mask(A, B, W, U, C) + +#define _mm512_maskz_div_round_ps(U, A, B, C) \ + (__m512)__builtin_ia32_divps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, C) + +#define _mm_mul_round_sd(A, B, C) \ + (__m128d)__builtin_ia32_mulsd_round(A, B, C) + +#define _mm_mask_mul_round_sd(W, U, A, B, C) \ + (__m128d)__builtin_ia32_mulsd_mask_round(A, B, W, U, C) + +#define _mm_maskz_mul_round_sd(U, A, B, C) \ + (__m128d)__builtin_ia32_mulsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) + +#define _mm_mul_round_ss(A, B, C) \ + (__m128)__builtin_ia32_mulss_round(A, B, C) + +#define _mm_mask_mul_round_ss(W, U, A, B, C) \ + (__m128)__builtin_ia32_mulss_mask_round(A, B, W, U, C) + +#define _mm_maskz_mul_round_ss(U, A, B, C) \ + (__m128)__builtin_ia32_mulss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) + +#define _mm_div_round_sd(A, B, C) \ + (__m128d)__builtin_ia32_divsd_round(A, B, C) + +#define _mm_mask_div_round_sd(W, U, A, B, C) \ + (__m128d)__builtin_ia32_divsd_mask_round(A, B, W, U, C) + +#define _mm_maskz_div_round_sd(U, A, B, C) \ + (__m128d)__builtin_ia32_divsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) + +#define _mm_div_round_ss(A, B, C) \ + (__m128)__builtin_ia32_divss_round(A, B, C) + +#define _mm_mask_div_round_ss(W, U, A, B, C) \ + (__m128)__builtin_ia32_divss_mask_round(A, B, W, U, C) + +#define _mm_maskz_div_round_ss(U, A, B, C) \ + (__m128)__builtin_ia32_divss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) + +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_round_pd (__m512d __A, __m512d __B, const int __R) +{ + return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_round_pd (__m512d __W, __mmask8 __U, __m512d __A, + __m512d __B, const int __R) +{ + return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + const int __R) +{ + return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_round_ps (__m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_round_ps (__m512 __W, __mmask16 __U, __m512 __A, + __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_round_ps (__mmask16 __U, __m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_round_pd (__m512d __A, __m512d __B, const int __R) +{ + return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_round_pd (__m512d __W, __mmask8 __U, __m512d __A, + __m512d __B, const int __R) +{ + return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + const int __R) +{ + return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_round_ps (__m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_round_ps (__m512 __W, __mmask16 __U, __m512 __A, + __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_round_ps (__mmask16 __U, __m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, __R); +} +#else +#define _mm512_max_round_pd(A, B, R) \ + (__m512d)__builtin_ia32_maxpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, R) + +#define _mm512_mask_max_round_pd(W, U, A, B, R) \ + (__m512d)__builtin_ia32_maxpd512_mask(A, B, W, U, R) + +#define _mm512_maskz_max_round_pd(U, A, B, R) \ + (__m512d)__builtin_ia32_maxpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, R) + +#define _mm512_max_round_ps(A, B, R) \ + (__m512)__builtin_ia32_maxps512_mask(A, B, (__v16sf)_mm512_undefined_pd(), -1, R) + +#define _mm512_mask_max_round_ps(W, U, A, B, R) \ + (__m512)__builtin_ia32_maxps512_mask(A, B, W, U, R) + +#define _mm512_maskz_max_round_ps(U, A, B, R) \ + (__m512)__builtin_ia32_maxps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, R) + +#define _mm512_min_round_pd(A, B, R) \ + (__m512d)__builtin_ia32_minpd512_mask(A, B, (__v8df)_mm512_undefined_pd(), -1, R) + +#define _mm512_mask_min_round_pd(W, U, A, B, R) \ + (__m512d)__builtin_ia32_minpd512_mask(A, B, W, U, R) + +#define _mm512_maskz_min_round_pd(U, A, B, R) \ + (__m512d)__builtin_ia32_minpd512_mask(A, B, (__v8df)_mm512_setzero_pd(), U, R) + +#define _mm512_min_round_ps(A, B, R) \ + (__m512)__builtin_ia32_minps512_mask(A, B, (__v16sf)_mm512_undefined_ps(), -1, R) + +#define _mm512_mask_min_round_ps(W, U, A, B, R) \ + (__m512)__builtin_ia32_minps512_mask(A, B, W, U, R) + +#define _mm512_maskz_min_round_ps(U, A, B, R) \ + (__m512)__builtin_ia32_minps512_mask(A, B, (__v16sf)_mm512_setzero_ps(), U, R) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_scalef_round_pd (__m512d __A, __m512d __B, const int __R) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_scalef_round_pd (__m512d __W, __mmask8 __U, __m512d __A, + __m512d __B, const int __R) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_scalef_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + const int __R) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_scalef_round_ps (__m512 __A, __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_scalef_round_ps (__m512 __W, __mmask16 __U, __m512 __A, + __m512 __B, const int __R) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_scalef_round_ps (__mmask16 __U, __m512 __A, __m512 __B, + const int __R) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_scalef_round_sd (__m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_scalefsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_scalef_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_scalefsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_scalef_round_sd (__mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_scalefsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_scalef_round_ss (__m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_scalefss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_scalef_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_scalefss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_scalef_round_ss (__mmask8 __U, __m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_scalefss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, __R); +} +#else +#define _mm512_scalef_round_pd(A, B, C) \ + ((__m512d) \ + __builtin_ia32_scalefpd512_mask((A), (B), \ + (__v8df) _mm512_undefined_pd(), \ + -1, (C))) + +#define _mm512_mask_scalef_round_pd(W, U, A, B, C) \ + ((__m512d) __builtin_ia32_scalefpd512_mask((A), (B), (W), (U), (C))) + +#define _mm512_maskz_scalef_round_pd(U, A, B, C) \ + ((__m512d) \ + __builtin_ia32_scalefpd512_mask((A), (B), \ + (__v8df) _mm512_setzero_pd(), \ + (U), (C))) + +#define _mm512_scalef_round_ps(A, B, C) \ + ((__m512) \ + __builtin_ia32_scalefps512_mask((A), (B), \ + (__v16sf) _mm512_undefined_ps(), \ + -1, (C))) + +#define _mm512_mask_scalef_round_ps(W, U, A, B, C) \ + ((__m512) __builtin_ia32_scalefps512_mask((A), (B), (W), (U), (C))) + +#define _mm512_maskz_scalef_round_ps(U, A, B, C) \ + ((__m512) \ + __builtin_ia32_scalefps512_mask((A), (B), \ + (__v16sf) _mm512_setzero_ps(), \ + (U), (C))) + +#define _mm_scalef_round_sd(A, B, C) \ + ((__m128d) \ + __builtin_ia32_scalefsd_mask_round ((A), (B), \ + (__v2df) _mm_undefined_pd (), \ + -1, (C))) + +#define _mm_scalef_round_ss(A, B, C) \ + ((__m128) \ + __builtin_ia32_scalefss_mask_round ((A), (B), \ + (__v4sf) _mm_undefined_ps (), \ + -1, (C))) + +#define _mm_mask_scalef_round_sd(W, U, A, B, C) \ + ((__m128d) \ + __builtin_ia32_scalefsd_mask_round ((A), (B), (W), (U), (C))) + +#define _mm_mask_scalef_round_ss(W, U, A, B, C) \ + ((__m128) \ + __builtin_ia32_scalefss_mask_round ((A), (B), (W), (U), (C))) + +#define _mm_maskz_scalef_round_sd(U, A, B, C) \ + ((__m128d) \ + __builtin_ia32_scalefsd_mask_round ((A), (B), \ + (__v2df) _mm_setzero_pd (), \ + (U), (C))) + +#define _mm_maskz_scalef_round_ss(U, A, B, C) \ + ((__m128) \ + __builtin_ia32_scalefss_mask_round ((A), (B), \ + (__v4sf) _mm_setzero_ps (), \ + (U), (C))) +#endif + +#define _mm_mask_scalef_sd(W, U, A, B) \ + _mm_mask_scalef_round_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_scalef_sd(U, A, B) \ + _mm_maskz_scalef_round_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_scalef_ss(W, U, A, B) \ + _mm_mask_scalef_round_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_scalef_ss(U, A, B) \ + _mm_maskz_scalef_round_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#ifdef __OPTIMIZE__ +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmadd_round_pd (__m512d __A, __m512d __B, __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmadd_round_pd (__m512d __A, __mmask8 __U, __m512d __B, + __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmadd_round_pd (__m512d __A, __m512d __B, __m512d __C, + __mmask8 __U, const int __R) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmadd_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmadd_round_ps (__m512 __A, __m512 __B, __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmadd_round_ps (__m512 __A, __mmask16 __U, __m512 __B, + __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmadd_round_ps (__m512 __A, __m512 __B, __m512 __C, + __mmask16 __U, const int __R) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmadd_round_ps (__mmask16 __U, __m512 __A, __m512 __B, + __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmsub_round_pd (__m512d __A, __m512d __B, __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfmsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmsub_round_pd (__m512d __A, __mmask8 __U, __m512d __B, + __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfmsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmsub_round_pd (__m512d __A, __m512d __B, __m512d __C, + __mmask8 __U, const int __R) +{ + return (__m512d) __builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmsub_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfmsubpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmsub_round_ps (__m512 __A, __m512 __B, __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfmsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmsub_round_ps (__m512 __A, __mmask16 __U, __m512 __B, + __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfmsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmsub_round_ps (__m512 __A, __m512 __B, __m512 __C, + __mmask16 __U, const int __R) +{ + return (__m512) __builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmsub_round_ps (__mmask16 __U, __m512 __A, __m512 __B, + __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfmsubps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmaddsub_round_pd (__m512d __A, __m512d __B, __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmaddsub_round_pd (__m512d __A, __mmask8 __U, __m512d __B, + __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmaddsub_round_pd (__m512d __A, __m512d __B, __m512d __C, + __mmask8 __U, const int __R) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmaddsub_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmaddsub_round_ps (__m512 __A, __m512 __B, __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmaddsub_round_ps (__m512 __A, __mmask16 __U, __m512 __B, + __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmaddsub_round_ps (__m512 __A, __m512 __B, __m512 __C, + __mmask16 __U, const int __R) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmaddsub_round_ps (__mmask16 __U, __m512 __A, __m512 __B, + __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmsubadd_round_pd (__m512d __A, __m512d __B, __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmsubadd_round_pd (__m512d __A, __mmask8 __U, __m512d __B, + __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmsubadd_round_pd (__m512d __A, __m512d __B, __m512d __C, + __mmask8 __U, const int __R) +{ + return (__m512d) __builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmsubadd_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmsubadd_round_ps (__m512 __A, __m512 __B, __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmsubadd_round_ps (__m512 __A, __mmask16 __U, __m512 __B, + __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmsubadd_round_ps (__m512 __A, __m512 __B, __m512 __C, + __mmask16 __U, const int __R) +{ + return (__m512) __builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmsubadd_round_ps (__mmask16 __U, __m512 __A, __m512 __B, + __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fnmadd_round_pd (__m512d __A, __m512d __B, __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fnmadd_round_pd (__m512d __A, __mmask8 __U, __m512d __B, + __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fnmadd_round_pd (__m512d __A, __m512d __B, __m512d __C, + __mmask8 __U, const int __R) +{ + return (__m512d) __builtin_ia32_vfnmaddpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fnmadd_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfnmaddpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fnmadd_round_ps (__m512 __A, __m512 __B, __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fnmadd_round_ps (__m512 __A, __mmask16 __U, __m512 __B, + __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fnmadd_round_ps (__m512 __A, __m512 __B, __m512 __C, + __mmask16 __U, const int __R) +{ + return (__m512) __builtin_ia32_vfnmaddps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fnmadd_round_ps (__mmask16 __U, __m512 __A, __m512 __B, + __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfnmaddps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fnmsub_round_pd (__m512d __A, __m512d __B, __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fnmsub_round_pd (__m512d __A, __mmask8 __U, __m512d __B, + __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fnmsub_round_pd (__m512d __A, __m512d __B, __m512d __C, + __mmask8 __U, const int __R) +{ + return (__m512d) __builtin_ia32_vfnmsubpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fnmsub_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + __m512d __C, const int __R) +{ + return (__m512d) __builtin_ia32_vfnmsubpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fnmsub_round_ps (__m512 __A, __m512 __B, __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fnmsub_round_ps (__m512 __A, __mmask16 __U, __m512 __B, + __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fnmsub_round_ps (__m512 __A, __m512 __B, __m512 __C, + __mmask16 __U, const int __R) +{ + return (__m512) __builtin_ia32_vfnmsubps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fnmsub_round_ps (__mmask16 __U, __m512 __A, __m512 __B, + __m512 __C, const int __R) +{ + return (__m512) __builtin_ia32_vfnmsubps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, __R); +} +#else +#define _mm512_fmadd_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask(A, B, C, -1, R) + +#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask(A, B, C, U, R) + +#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_mask3(A, B, C, U, R) + +#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddpd512_maskz(A, B, C, U, R) + +#define _mm512_fmadd_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask(A, B, C, -1, R) + +#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask(A, B, C, U, R) + +#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \ + (__m512)__builtin_ia32_vfmaddps512_mask3(A, B, C, U, R) + +#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddps512_maskz(A, B, C, U, R) + +#define _mm512_fmsub_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_vfmsubpd512_mask(A, B, C, -1, R) + +#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \ + (__m512d)__builtin_ia32_vfmsubpd512_mask(A, B, C, U, R) + +#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \ + (__m512d)__builtin_ia32_vfmsubpd512_mask3(A, B, C, U, R) + +#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_vfmsubpd512_maskz(A, B, C, U, R) + +#define _mm512_fmsub_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_vfmsubps512_mask(A, B, C, -1, R) + +#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \ + (__m512)__builtin_ia32_vfmsubps512_mask(A, B, C, U, R) + +#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \ + (__m512)__builtin_ia32_vfmsubps512_mask3(A, B, C, U, R) + +#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_vfmsubps512_maskz(A, B, C, U, R) + +#define _mm512_fmaddsub_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_mask(A, B, C, -1, R) + +#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_mask(A, B, C, U, R) + +#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_mask3(A, B, C, U, R) + +#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_maskz(A, B, C, U, R) + +#define _mm512_fmaddsub_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_mask(A, B, C, -1, R) + +#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_mask(A, B, C, U, R) + +#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_mask3(A, B, C, U, R) + +#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_maskz(A, B, C, U, R) + +#define _mm512_fmsubadd_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_mask(A, B, -(C), -1, R) + +#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_mask(A, B, -(C), U, R) + +#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \ + (__m512d)__builtin_ia32_vfmsubaddpd512_mask3(A, B, C, U, R) + +#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_vfmaddsubpd512_maskz(A, B, -(C), U, R) + +#define _mm512_fmsubadd_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_mask(A, B, -(C), -1, R) + +#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_mask(A, B, -(C), U, R) + +#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \ + (__m512)__builtin_ia32_vfmsubaddps512_mask3(A, B, C, U, R) + +#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_vfmaddsubps512_maskz(A, B, -(C), U, R) + +#define _mm512_fnmadd_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_vfnmaddpd512_mask(A, B, C, -1, R) + +#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \ + (__m512d)__builtin_ia32_vfnmaddpd512_mask(A, B, C, U, R) + +#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \ + (__m512d)__builtin_ia32_vfnmaddpd512_mask3(A, B, C, U, R) + +#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_vfnmaddpd512_maskz(A, B, C, U, R) + +#define _mm512_fnmadd_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_vfnmaddps512_mask(A, B, C, -1, R) + +#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \ + (__m512)__builtin_ia32_vfnmaddps512_mask(A, B, C, U, R) + +#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \ + (__m512)__builtin_ia32_vfnmaddps512_mask3(A, B, C, U, R) + +#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_vfnmaddps512_maskz(A, B, C, U, R) + +#define _mm512_fnmsub_round_pd(A, B, C, R) \ + (__m512d)__builtin_ia32_vfnmsubpd512_mask(A, B, C, -1, R) + +#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \ + (__m512d)__builtin_ia32_vfnmsubpd512_mask(A, B, C, U, R) + +#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \ + (__m512d)__builtin_ia32_vfnmsubpd512_mask3(A, B, C, U, R) + +#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \ + (__m512d)__builtin_ia32_vfnmsubpd512_maskz(A, B, C, U, R) + +#define _mm512_fnmsub_round_ps(A, B, C, R) \ + (__m512)__builtin_ia32_vfnmsubps512_mask(A, B, C, -1, R) + +#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \ + (__m512)__builtin_ia32_vfnmsubps512_mask(A, B, C, U, R) + +#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \ + (__m512)__builtin_ia32_vfnmsubps512_mask3(A, B, C, U, R) + +#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \ + (__m512)__builtin_ia32_vfnmsubps512_maskz(A, B, C, U, R) +#endif + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_abs_epi64 (__m512i __A) +{ + return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_abs_epi32 (__m512i __A) +{ + return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcastss_ps (__m128 __A) +{ + return (__m512) __builtin_ia32_broadcastss512 ((__v4sf) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcastss_ps (__m512 __O, __mmask16 __M, __m128 __A) +{ + return (__m512) __builtin_ia32_broadcastss512 ((__v4sf) __A, + (__v16sf) __O, __M); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcastss_ps (__mmask16 __M, __m128 __A) +{ + return (__m512) __builtin_ia32_broadcastss512 ((__v4sf) __A, + (__v16sf) + _mm512_setzero_ps (), + __M); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcastsd_pd (__m128d __A) +{ + return (__m512d) __builtin_ia32_broadcastsd512 ((__v2df) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcastsd_pd (__m512d __O, __mmask8 __M, __m128d __A) +{ + return (__m512d) __builtin_ia32_broadcastsd512 ((__v2df) __A, + (__v8df) __O, __M); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A) +{ + return (__m512d) __builtin_ia32_broadcastsd512 ((__v2df) __A, + (__v8df) + _mm512_setzero_pd (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcastd_epi32 (__m128i __A) +{ + return (__m512i) __builtin_ia32_pbroadcastd512 ((__v4si) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_pbroadcastd512 ((__v4si) __A, + (__v16si) __O, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_pbroadcastd512 ((__v4si) __A, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set1_epi32 (int __A) +{ + return (__m512i)(__v16si) + { __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A }; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A) +{ + return (__m512i) __builtin_ia32_pbroadcastd512_gpr_mask (__A, (__v16si) __O, + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_set1_epi32 (__mmask16 __M, int __A) +{ + return (__m512i) + __builtin_ia32_pbroadcastd512_gpr_mask (__A, + (__v16si) _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcastq_epi64 (__m128i __A) +{ + return (__m512i) __builtin_ia32_pbroadcastq512 ((__v2di) __A, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_pbroadcastq512 ((__v2di) __A, + (__v8di) __O, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_pbroadcastq512 ((__v2di) __A, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set1_epi64 (long long __A) +{ + return (__m512i)(__v8di) { __A, __A, __A, __A, __A, __A, __A, __A }; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A) +{ + return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A, (__v8di) __O, + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_set1_epi64 (__mmask8 __M, long long __A) +{ + return (__m512i) + __builtin_ia32_pbroadcastq512_gpr_mask (__A, + (__v8di) _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcast_f32x4 (__m128 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcast_f32x4 (__m512 __O, __mmask16 __M, __m128 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A, + (__v16sf) __O, + __M); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcast_f32x4 (__mmask16 __M, __m128 __A) +{ + return (__m512) __builtin_ia32_broadcastf32x4_512 ((__v4sf) __A, + (__v16sf) + _mm512_setzero_ps (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcast_i32x4 (__m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcast_i32x4 (__m512i __O, __mmask16 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A, + (__v16si) __O, + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcast_i32x4 (__mmask16 __M, __m128i __A) +{ + return (__m512i) __builtin_ia32_broadcasti32x4_512 ((__v4si) __A, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcast_f64x4 (__m256d __A) +{ + return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcast_f64x4 (__m512d __O, __mmask8 __M, __m256d __A) +{ + return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A, + (__v8df) __O, + __M); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcast_f64x4 (__mmask8 __M, __m256d __A) +{ + return (__m512d) __builtin_ia32_broadcastf64x4_512 ((__v4df) __A, + (__v8df) + _mm512_setzero_pd (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_broadcast_i64x4 (__m256i __A) +{ + return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_broadcast_i64x4 (__m512i __O, __mmask8 __M, __m256i __A) +{ + return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A, + (__v8di) __O, + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_broadcast_i64x4 (__mmask8 __M, __m256i __A) +{ + return (__m512i) __builtin_ia32_broadcasti64x4_512 ((__v4di) __A, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +typedef enum +{ + _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02, + _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05, + _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08, + _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B, + _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E, + _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11, + _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14, + _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17, + _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A, + _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D, + _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20, + _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23, + _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26, + _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29, + _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C, + _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F, + _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32, + _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35, + _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38, + _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B, + _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E, + _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41, + _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44, + _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47, + _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A, + _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D, + _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50, + _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53, + _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56, + _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59, + _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C, + _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F, + _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62, + _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65, + _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68, + _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B, + _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E, + _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71, + _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74, + _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77, + _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A, + _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D, + _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80, + _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83, + _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86, + _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89, + _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C, + _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F, + _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92, + _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95, + _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98, + _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B, + _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E, + _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1, + _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4, + _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7, + _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA, + _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD, + _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0, + _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3, + _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6, + _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9, + _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC, + _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF, + _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2, + _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5, + _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8, + _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB, + _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE, + _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1, + _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4, + _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7, + _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA, + _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD, + _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0, + _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3, + _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6, + _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9, + _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC, + _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF, + _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2, + _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5, + _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8, + _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB, + _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE, + _MM_PERM_DDDD = 0xFF +} _MM_PERM_ENUM; + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shuffle_epi32 (__m512i __A, _MM_PERM_ENUM __mask) +{ + return (__m512i) __builtin_ia32_pshufd512_mask ((__v16si) __A, + __mask, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shuffle_epi32 (__m512i __W, __mmask16 __U, __m512i __A, + _MM_PERM_ENUM __mask) +{ + return (__m512i) __builtin_ia32_pshufd512_mask ((__v16si) __A, + __mask, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shuffle_epi32 (__mmask16 __U, __m512i __A, _MM_PERM_ENUM __mask) +{ + return (__m512i) __builtin_ia32_pshufd512_mask ((__v16si) __A, + __mask, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shuffle_i64x2 (__m512i __A, __m512i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_shuf_i64x2_mask ((__v8di) __A, + (__v8di) __B, __imm, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shuffle_i64x2 (__m512i __W, __mmask8 __U, __m512i __A, + __m512i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_shuf_i64x2_mask ((__v8di) __A, + (__v8di) __B, __imm, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shuffle_i64x2 (__mmask8 __U, __m512i __A, __m512i __B, + const int __imm) +{ + return (__m512i) __builtin_ia32_shuf_i64x2_mask ((__v8di) __A, + (__v8di) __B, __imm, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shuffle_i32x4 (__m512i __A, __m512i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_shuf_i32x4_mask ((__v16si) __A, + (__v16si) __B, + __imm, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shuffle_i32x4 (__m512i __W, __mmask16 __U, __m512i __A, + __m512i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_shuf_i32x4_mask ((__v16si) __A, + (__v16si) __B, + __imm, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shuffle_i32x4 (__mmask16 __U, __m512i __A, __m512i __B, + const int __imm) +{ + return (__m512i) __builtin_ia32_shuf_i32x4_mask ((__v16si) __A, + (__v16si) __B, + __imm, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shuffle_f64x2 (__m512d __A, __m512d __B, const int __imm) +{ + return (__m512d) __builtin_ia32_shuf_f64x2_mask ((__v8df) __A, + (__v8df) __B, __imm, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shuffle_f64x2 (__m512d __W, __mmask8 __U, __m512d __A, + __m512d __B, const int __imm) +{ + return (__m512d) __builtin_ia32_shuf_f64x2_mask ((__v8df) __A, + (__v8df) __B, __imm, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shuffle_f64x2 (__mmask8 __U, __m512d __A, __m512d __B, + const int __imm) +{ + return (__m512d) __builtin_ia32_shuf_f64x2_mask ((__v8df) __A, + (__v8df) __B, __imm, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shuffle_f32x4 (__m512 __A, __m512 __B, const int __imm) +{ + return (__m512) __builtin_ia32_shuf_f32x4_mask ((__v16sf) __A, + (__v16sf) __B, __imm, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shuffle_f32x4 (__m512 __W, __mmask16 __U, __m512 __A, + __m512 __B, const int __imm) +{ + return (__m512) __builtin_ia32_shuf_f32x4_mask ((__v16sf) __A, + (__v16sf) __B, __imm, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shuffle_f32x4 (__mmask16 __U, __m512 __A, __m512 __B, + const int __imm) +{ + return (__m512) __builtin_ia32_shuf_f32x4_mask ((__v16sf) __A, + (__v16sf) __B, __imm, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +#else +#define _mm512_shuffle_epi32(X, C) \ + ((__m512i) __builtin_ia32_pshufd512_mask ((__v16si)(__m512i)(X), (int)(C),\ + (__v16si)(__m512i)_mm512_undefined_epi32 (),\ + (__mmask16)-1)) + +#define _mm512_mask_shuffle_epi32(W, U, X, C) \ + ((__m512i) __builtin_ia32_pshufd512_mask ((__v16si)(__m512i)(X), (int)(C),\ + (__v16si)(__m512i)(W),\ + (__mmask16)(U))) + +#define _mm512_maskz_shuffle_epi32(U, X, C) \ + ((__m512i) __builtin_ia32_pshufd512_mask ((__v16si)(__m512i)(X), (int)(C),\ + (__v16si)(__m512i)_mm512_setzero_si512 (),\ + (__mmask16)(U))) + +#define _mm512_shuffle_i64x2(X, Y, C) \ + ((__m512i) __builtin_ia32_shuf_i64x2_mask ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), (int)(C),\ + (__v8di)(__m512i)_mm512_undefined_epi32 (),\ + (__mmask8)-1)) + +#define _mm512_mask_shuffle_i64x2(W, U, X, Y, C) \ + ((__m512i) __builtin_ia32_shuf_i64x2_mask ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), (int)(C),\ + (__v8di)(__m512i)(W),\ + (__mmask8)(U))) + +#define _mm512_maskz_shuffle_i64x2(U, X, Y, C) \ + ((__m512i) __builtin_ia32_shuf_i64x2_mask ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), (int)(C),\ + (__v8di)(__m512i)_mm512_setzero_si512 (),\ + (__mmask8)(U))) + +#define _mm512_shuffle_i32x4(X, Y, C) \ + ((__m512i) __builtin_ia32_shuf_i32x4_mask ((__v16si)(__m512i)(X), \ + (__v16si)(__m512i)(Y), (int)(C),\ + (__v16si)(__m512i)_mm512_undefined_epi32 (),\ + (__mmask16)-1)) + +#define _mm512_mask_shuffle_i32x4(W, U, X, Y, C) \ + ((__m512i) __builtin_ia32_shuf_i32x4_mask ((__v16si)(__m512i)(X), \ + (__v16si)(__m512i)(Y), (int)(C),\ + (__v16si)(__m512i)(W),\ + (__mmask16)(U))) + +#define _mm512_maskz_shuffle_i32x4(U, X, Y, C) \ + ((__m512i) __builtin_ia32_shuf_i32x4_mask ((__v16si)(__m512i)(X), \ + (__v16si)(__m512i)(Y), (int)(C),\ + (__v16si)(__m512i)_mm512_setzero_si512 (),\ + (__mmask16)(U))) + +#define _mm512_shuffle_f64x2(X, Y, C) \ + ((__m512d) __builtin_ia32_shuf_f64x2_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (int)(C),\ + (__v8df)(__m512d)_mm512_undefined_pd(),\ + (__mmask8)-1)) + +#define _mm512_mask_shuffle_f64x2(W, U, X, Y, C) \ + ((__m512d) __builtin_ia32_shuf_f64x2_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (int)(C),\ + (__v8df)(__m512d)(W),\ + (__mmask8)(U))) + +#define _mm512_maskz_shuffle_f64x2(U, X, Y, C) \ + ((__m512d) __builtin_ia32_shuf_f64x2_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (int)(C),\ + (__v8df)(__m512d)_mm512_setzero_pd(),\ + (__mmask8)(U))) + +#define _mm512_shuffle_f32x4(X, Y, C) \ + ((__m512) __builtin_ia32_shuf_f32x4_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (int)(C),\ + (__v16sf)(__m512)_mm512_undefined_ps(),\ + (__mmask16)-1)) + +#define _mm512_mask_shuffle_f32x4(W, U, X, Y, C) \ + ((__m512) __builtin_ia32_shuf_f32x4_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (int)(C),\ + (__v16sf)(__m512)(W),\ + (__mmask16)(U))) + +#define _mm512_maskz_shuffle_f32x4(U, X, Y, C) \ + ((__m512) __builtin_ia32_shuf_f32x4_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (int)(C),\ + (__v16sf)(__m512)_mm512_setzero_ps(),\ + (__mmask16)(U))) +#endif + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rolv_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rolv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rolv_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rorv_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rorv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rorv_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rolv_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rolv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rorv_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rorv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundpd_epi32 (__m512d __A, const int __R) +{ + return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A, + const int __R) +{ + return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundpd_epi32 (__mmask8 __U, __m512d __A, const int __R) +{ + return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundpd_epu32 (__m512d __A, const int __R) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A, + const int __R) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundpd_epu32 (__mmask8 __U, __m512d __A, const int __R) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, __R); +} +#else +#define _mm512_cvtt_roundpd_epi32(A, B) \ + ((__m256i)__builtin_ia32_cvttpd2dq512_mask(A, (__v8si)_mm256_undefined_si256(), -1, B)) + +#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, B) \ + ((__m256i)__builtin_ia32_cvttpd2dq512_mask(A, (__v8si)(W), U, B)) + +#define _mm512_maskz_cvtt_roundpd_epi32(U, A, B) \ + ((__m256i)__builtin_ia32_cvttpd2dq512_mask(A, (__v8si)_mm256_setzero_si256(), U, B)) + +#define _mm512_cvtt_roundpd_epu32(A, B) \ + ((__m256i)__builtin_ia32_cvttpd2udq512_mask(A, (__v8si)_mm256_undefined_si256(), -1, B)) + +#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, B) \ + ((__m256i)__builtin_ia32_cvttpd2udq512_mask(A, (__v8si)(W), U, B)) + +#define _mm512_maskz_cvtt_roundpd_epu32(U, A, B) \ + ((__m256i)__builtin_ia32_cvttpd2udq512_mask(A, (__v8si)_mm256_setzero_si256(), U, B)) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundpd_epi32 (__m512d __A, const int __R) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A, + const int __R) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundpd_epi32 (__mmask8 __U, __m512d __A, const int __R) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundpd_epu32 (__m512d __A, const int __R) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A, + const int __R) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundpd_epu32 (__mmask8 __U, __m512d __A, const int __R) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, __R); +} +#else +#define _mm512_cvt_roundpd_epi32(A, B) \ + ((__m256i)__builtin_ia32_cvtpd2dq512_mask(A, (__v8si)_mm256_undefined_si256(), -1, B)) + +#define _mm512_mask_cvt_roundpd_epi32(W, U, A, B) \ + ((__m256i)__builtin_ia32_cvtpd2dq512_mask(A, (__v8si)(W), U, B)) + +#define _mm512_maskz_cvt_roundpd_epi32(U, A, B) \ + ((__m256i)__builtin_ia32_cvtpd2dq512_mask(A, (__v8si)_mm256_setzero_si256(), U, B)) + +#define _mm512_cvt_roundpd_epu32(A, B) \ + ((__m256i)__builtin_ia32_cvtpd2udq512_mask(A, (__v8si)_mm256_undefined_si256(), -1, B)) + +#define _mm512_mask_cvt_roundpd_epu32(W, U, A, B) \ + ((__m256i)__builtin_ia32_cvtpd2udq512_mask(A, (__v8si)(W), U, B)) + +#define _mm512_maskz_cvt_roundpd_epu32(U, A, B) \ + ((__m256i)__builtin_ia32_cvtpd2udq512_mask(A, (__v8si)_mm256_setzero_si256(), U, B)) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundps_epi32 (__m512 __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1, __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundps_epi32 (__m512i __W, __mmask16 __U, __m512 __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundps_epi32 (__mmask16 __U, __m512 __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U, __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundps_epu32 (__m512 __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1, __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundps_epu32 (__m512i __W, __mmask16 __U, __m512 __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundps_epu32 (__mmask16 __U, __m512 __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U, __R); +} +#else +#define _mm512_cvtt_roundps_epi32(A, B) \ + ((__m512i)__builtin_ia32_cvttps2dq512_mask(A, (__v16si)_mm512_undefined_epi32 (), -1, B)) + +#define _mm512_mask_cvtt_roundps_epi32(W, U, A, B) \ + ((__m512i)__builtin_ia32_cvttps2dq512_mask(A, (__v16si)(W), U, B)) + +#define _mm512_maskz_cvtt_roundps_epi32(U, A, B) \ + ((__m512i)__builtin_ia32_cvttps2dq512_mask(A, (__v16si)_mm512_setzero_si512 (), U, B)) + +#define _mm512_cvtt_roundps_epu32(A, B) \ + ((__m512i)__builtin_ia32_cvttps2udq512_mask(A, (__v16si)_mm512_undefined_epi32 (), -1, B)) + +#define _mm512_mask_cvtt_roundps_epu32(W, U, A, B) \ + ((__m512i)__builtin_ia32_cvttps2udq512_mask(A, (__v16si)(W), U, B)) + +#define _mm512_maskz_cvtt_roundps_epu32(U, A, B) \ + ((__m512i)__builtin_ia32_cvttps2udq512_mask(A, (__v16si)_mm512_setzero_si512 (), U, B)) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundps_epi32 (__m512 __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1, __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundps_epi32 (__m512i __W, __mmask16 __U, __m512 __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundps_epi32 (__mmask16 __U, __m512 __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U, __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundps_epu32 (__m512 __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1, __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundps_epu32 (__m512i __W, __mmask16 __U, __m512 __A, + const int __R) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundps_epu32 (__mmask16 __U, __m512 __A, const int __R) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U, __R); +} +#else +#define _mm512_cvt_roundps_epi32(A, B) \ + ((__m512i)__builtin_ia32_cvtps2dq512_mask(A, (__v16si)_mm512_undefined_epi32 (), -1, B)) + +#define _mm512_mask_cvt_roundps_epi32(W, U, A, B) \ + ((__m512i)__builtin_ia32_cvtps2dq512_mask(A, (__v16si)(W), U, B)) + +#define _mm512_maskz_cvt_roundps_epi32(U, A, B) \ + ((__m512i)__builtin_ia32_cvtps2dq512_mask(A, (__v16si)_mm512_setzero_si512 (), U, B)) + +#define _mm512_cvt_roundps_epu32(A, B) \ + ((__m512i)__builtin_ia32_cvtps2udq512_mask(A, (__v16si)_mm512_undefined_epi32 (), -1, B)) + +#define _mm512_mask_cvt_roundps_epu32(W, U, A, B) \ + ((__m512i)__builtin_ia32_cvtps2udq512_mask(A, (__v16si)(W), U, B)) + +#define _mm512_maskz_cvt_roundps_epu32(U, A, B) \ + ((__m512i)__builtin_ia32_cvtps2udq512_mask(A, (__v16si)_mm512_setzero_si512 (), U, B)) +#endif + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtu32_sd (__m128d __A, unsigned __B) +{ + return (__m128d) __builtin_ia32_cvtusi2sd32 ((__v2df) __A, __B); +} + +#ifdef __x86_64__ +#ifdef __OPTIMIZE__ +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundu64_sd (__m128d __A, unsigned long long __B, const int __R) +{ + return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundi64_sd (__m128d __A, long long __B, const int __R) +{ + return (__m128d) __builtin_ia32_cvtsi2sd64 ((__v2df) __A, __B, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsi64_sd (__m128d __A, long long __B, const int __R) +{ + return (__m128d) __builtin_ia32_cvtsi2sd64 ((__v2df) __A, __B, __R); +} +#else +#define _mm_cvt_roundu64_sd(A, B, C) \ + (__m128d)__builtin_ia32_cvtusi2sd64(A, B, C) + +#define _mm_cvt_roundi64_sd(A, B, C) \ + (__m128d)__builtin_ia32_cvtsi2sd64(A, B, C) + +#define _mm_cvt_roundsi64_sd(A, B, C) \ + (__m128d)__builtin_ia32_cvtsi2sd64(A, B, C) +#endif + +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundu32_ss (__m128 __A, unsigned __B, const int __R) +{ + return (__m128) __builtin_ia32_cvtusi2ss32 ((__v4sf) __A, __B, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsi32_ss (__m128 __A, int __B, const int __R) +{ + return (__m128) __builtin_ia32_cvtsi2ss32 ((__v4sf) __A, __B, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundi32_ss (__m128 __A, int __B, const int __R) +{ + return (__m128) __builtin_ia32_cvtsi2ss32 ((__v4sf) __A, __B, __R); +} +#else +#define _mm_cvt_roundu32_ss(A, B, C) \ + (__m128)__builtin_ia32_cvtusi2ss32(A, B, C) + +#define _mm_cvt_roundi32_ss(A, B, C) \ + (__m128)__builtin_ia32_cvtsi2ss32(A, B, C) + +#define _mm_cvt_roundsi32_ss(A, B, C) \ + (__m128)__builtin_ia32_cvtsi2ss32(A, B, C) +#endif + +#ifdef __x86_64__ +#ifdef __OPTIMIZE__ +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundu64_ss (__m128 __A, unsigned long long __B, const int __R) +{ + return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsi64_ss (__m128 __A, long long __B, const int __R) +{ + return (__m128) __builtin_ia32_cvtsi2ss64 ((__v4sf) __A, __B, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundi64_ss (__m128 __A, long long __B, const int __R) +{ + return (__m128) __builtin_ia32_cvtsi2ss64 ((__v4sf) __A, __B, __R); +} +#else +#define _mm_cvt_roundu64_ss(A, B, C) \ + (__m128)__builtin_ia32_cvtusi2ss64(A, B, C) + +#define _mm_cvt_roundi64_ss(A, B, C) \ + (__m128)__builtin_ia32_cvtsi2ss64(A, B, C) + +#define _mm_cvt_roundsi64_ss(A, B, C) \ + (__m128)__builtin_ia32_cvtsi2ss64(A, B, C) +#endif + +#endif + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi32_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask16) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtsepi32_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask16) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovsdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtsepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtsepi32_epi8 (__mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtusepi32_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask16) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovusdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtusepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A, + (__v16qi) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtusepi32_epi8 (__mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi32_epi16 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A, + (__v16hi) + _mm256_undefined_si256 (), + (__mmask16) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi32_storeu_epi16 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovdw512mem_mask ((__v16hi *) __P, (__v16si) __A, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A, + (__v16hi) __O, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A, + (__v16hi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtsepi32_epi16 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A, + (__v16hi) + _mm256_undefined_si256 (), + (__mmask16) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtsepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovsdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtsepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A, + (__v16hi) __O, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtsepi32_epi16 (__mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A, + (__v16hi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtusepi32_epi16 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A, + (__v16hi) + _mm256_undefined_si256 (), + (__mmask16) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtusepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovusdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtusepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A, + (__v16hi) __O, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtusepi32_epi16 (__mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A, + (__v16hi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi64_epi32 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A, + (__v8si) __O, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtsepi64_epi32 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtsepi64_storeu_epi32 (void *__P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovsqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtsepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A, + (__v8si) __O, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtsepi64_epi32 (__mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtusepi64_epi32 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtusepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovusqd512mem_mask ((__v8si*) __P, (__v8di) __A, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtusepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A, + (__v8si) __O, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtusepi64_epi32 (__mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi64_epi16 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A, + (__v8hi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtsepi64_epi16 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A, + (__v8hi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovsqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtsepi64_epi16 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtusepi64_epi16 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A, + (__v8hi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtusepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovusqw512mem_mask ((__v8hi*) __P, (__v8di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtusepi64_epi16 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi64_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovqb512mem_mask ((unsigned long long *) __P, + (__v8di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi64_epi8 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtsepi64_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovsqb512mem_mask ((unsigned long long *) __P, (__v8di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtsepi64_epi8 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtusepi64_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovusqb512mem_mask ((unsigned long long *) __P, (__v8di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A, + (__v16qi) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtusepi64_epi8 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi32_pd (__m256i __A) +{ + return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A) +{ + return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A) +{ + return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepu32_pd (__m256i __A) +{ + return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A) +{ + return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A) +{ + return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundepi32_ps (__m512i __A, const int __R) +{ + return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundepi32_ps (__m512 __W, __mmask16 __U, __m512i __A, + const int __R) +{ + return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundepi32_ps (__mmask16 __U, __m512i __A, const int __R) +{ + return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundepu32_ps (__m512i __A, const int __R) +{ + return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundepu32_ps (__m512 __W, __mmask16 __U, __m512i __A, + const int __R) +{ + return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundepu32_ps (__mmask16 __U, __m512i __A, const int __R) +{ + return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +#else +#define _mm512_cvt_roundepi32_ps(A, B) \ + (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(A), (__v16sf)_mm512_undefined_ps(), -1, B) + +#define _mm512_mask_cvt_roundepi32_ps(W, U, A, B) \ + (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(A), W, U, B) + +#define _mm512_maskz_cvt_roundepi32_ps(U, A, B) \ + (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(A), (__v16sf)_mm512_setzero_ps(), U, B) + +#define _mm512_cvt_roundepu32_ps(A, B) \ + (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(A), (__v16sf)_mm512_undefined_ps(), -1, B) + +#define _mm512_mask_cvt_roundepu32_ps(W, U, A, B) \ + (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(A), W, U, B) + +#define _mm512_maskz_cvt_roundepu32_ps(U, A, B) \ + (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(A), (__v16sf)_mm512_setzero_ps(), U, B) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_extractf64x4_pd (__m512d __A, const int __imm) +{ + return (__m256d) __builtin_ia32_extractf64x4_mask ((__v8df) __A, + __imm, + (__v4df) + _mm256_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_extractf64x4_pd (__m256d __W, __mmask8 __U, __m512d __A, + const int __imm) +{ + return (__m256d) __builtin_ia32_extractf64x4_mask ((__v8df) __A, + __imm, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_extractf64x4_pd (__mmask8 __U, __m512d __A, const int __imm) +{ + return (__m256d) __builtin_ia32_extractf64x4_mask ((__v8df) __A, + __imm, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_extractf32x4_ps (__m512 __A, const int __imm) +{ + return (__m128) __builtin_ia32_extractf32x4_mask ((__v16sf) __A, + __imm, + (__v4sf) + _mm_undefined_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_extractf32x4_ps (__m128 __W, __mmask8 __U, __m512 __A, + const int __imm) +{ + return (__m128) __builtin_ia32_extractf32x4_mask ((__v16sf) __A, + __imm, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_extractf32x4_ps (__mmask8 __U, __m512 __A, const int __imm) +{ + return (__m128) __builtin_ia32_extractf32x4_mask ((__v16sf) __A, + __imm, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_extracti64x4_epi64 (__m512i __A, const int __imm) +{ + return (__m256i) __builtin_ia32_extracti64x4_mask ((__v8di) __A, + __imm, + (__v4di) + _mm256_undefined_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_extracti64x4_epi64 (__m256i __W, __mmask8 __U, __m512i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_extracti64x4_mask ((__v8di) __A, + __imm, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_extracti64x4_epi64 (__mmask8 __U, __m512i __A, const int __imm) +{ + return (__m256i) __builtin_ia32_extracti64x4_mask ((__v8di) __A, + __imm, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_extracti32x4_epi32 (__m512i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_extracti32x4_mask ((__v16si) __A, + __imm, + (__v4si) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_extracti32x4_epi32 (__m128i __W, __mmask8 __U, __m512i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_extracti32x4_mask ((__v16si) __A, + __imm, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_extracti32x4_epi32 (__mmask8 __U, __m512i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_extracti32x4_mask ((__v16si) __A, + __imm, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} +#else + +#define _mm512_extractf64x4_pd(X, C) \ + ((__m256d) __builtin_ia32_extractf64x4_mask ((__v8df)(__m512d) (X), \ + (int) (C),\ + (__v4df)(__m256d)_mm256_undefined_pd(),\ + (__mmask8)-1)) + +#define _mm512_mask_extractf64x4_pd(W, U, X, C) \ + ((__m256d) __builtin_ia32_extractf64x4_mask ((__v8df)(__m512d) (X), \ + (int) (C),\ + (__v4df)(__m256d)(W),\ + (__mmask8)(U))) + +#define _mm512_maskz_extractf64x4_pd(U, X, C) \ + ((__m256d) __builtin_ia32_extractf64x4_mask ((__v8df)(__m512d) (X), \ + (int) (C),\ + (__v4df)(__m256d)_mm256_setzero_pd(),\ + (__mmask8)(U))) + +#define _mm512_extractf32x4_ps(X, C) \ + ((__m128) __builtin_ia32_extractf32x4_mask ((__v16sf)(__m512) (X), \ + (int) (C),\ + (__v4sf)(__m128)_mm_undefined_ps(),\ + (__mmask8)-1)) + +#define _mm512_mask_extractf32x4_ps(W, U, X, C) \ + ((__m128) __builtin_ia32_extractf32x4_mask ((__v16sf)(__m512) (X), \ + (int) (C),\ + (__v4sf)(__m128)(W),\ + (__mmask8)(U))) + +#define _mm512_maskz_extractf32x4_ps(U, X, C) \ + ((__m128) __builtin_ia32_extractf32x4_mask ((__v16sf)(__m512) (X), \ + (int) (C),\ + (__v4sf)(__m128)_mm_setzero_ps(),\ + (__mmask8)(U))) + +#define _mm512_extracti64x4_epi64(X, C) \ + ((__m256i) __builtin_ia32_extracti64x4_mask ((__v8di)(__m512i) (X), \ + (int) (C),\ + (__v4di)(__m256i)_mm256_undefined_si256 (),\ + (__mmask8)-1)) + +#define _mm512_mask_extracti64x4_epi64(W, U, X, C) \ + ((__m256i) __builtin_ia32_extracti64x4_mask ((__v8di)(__m512i) (X), \ + (int) (C),\ + (__v4di)(__m256i)(W),\ + (__mmask8)(U))) + +#define _mm512_maskz_extracti64x4_epi64(U, X, C) \ + ((__m256i) __builtin_ia32_extracti64x4_mask ((__v8di)(__m512i) (X), \ + (int) (C),\ + (__v4di)(__m256i)_mm256_setzero_si256 (),\ + (__mmask8)(U))) + +#define _mm512_extracti32x4_epi32(X, C) \ + ((__m128i) __builtin_ia32_extracti32x4_mask ((__v16si)(__m512i) (X), \ + (int) (C),\ + (__v4si)(__m128i)_mm_undefined_si128 (),\ + (__mmask8)-1)) + +#define _mm512_mask_extracti32x4_epi32(W, U, X, C) \ + ((__m128i) __builtin_ia32_extracti32x4_mask ((__v16si)(__m512i) (X), \ + (int) (C),\ + (__v4si)(__m128i)(W),\ + (__mmask8)(U))) + +#define _mm512_maskz_extracti32x4_epi32(U, X, C) \ + ((__m128i) __builtin_ia32_extracti32x4_mask ((__v16si)(__m512i) (X), \ + (int) (C),\ + (__v4si)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)(U))) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_inserti32x4 (__m512i __A, __m128i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_inserti32x4_mask ((__v16si) __A, + (__v4si) __B, + __imm, + (__v16si) __A, -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_insertf32x4 (__m512 __A, __m128 __B, const int __imm) +{ + return (__m512) __builtin_ia32_insertf32x4_mask ((__v16sf) __A, + (__v4sf) __B, + __imm, + (__v16sf) __A, -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_inserti64x4 (__m512i __A, __m256i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_inserti64x4_mask ((__v8di) __A, + (__v4di) __B, + __imm, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_inserti64x4 (__m512i __W, __mmask8 __U, __m512i __A, + __m256i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_inserti64x4_mask ((__v8di) __A, + (__v4di) __B, + __imm, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_inserti64x4 (__mmask8 __U, __m512i __A, __m256i __B, + const int __imm) +{ + return (__m512i) __builtin_ia32_inserti64x4_mask ((__v8di) __A, + (__v4di) __B, + __imm, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_insertf64x4 (__m512d __A, __m256d __B, const int __imm) +{ + return (__m512d) __builtin_ia32_insertf64x4_mask ((__v8df) __A, + (__v4df) __B, + __imm, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_insertf64x4 (__m512d __W, __mmask8 __U, __m512d __A, + __m256d __B, const int __imm) +{ + return (__m512d) __builtin_ia32_insertf64x4_mask ((__v8df) __A, + (__v4df) __B, + __imm, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_insertf64x4 (__mmask8 __U, __m512d __A, __m256d __B, + const int __imm) +{ + return (__m512d) __builtin_ia32_insertf64x4_mask ((__v8df) __A, + (__v4df) __B, + __imm, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} +#else +#define _mm512_insertf32x4(X, Y, C) \ + ((__m512) __builtin_ia32_insertf32x4_mask ((__v16sf)(__m512) (X), \ + (__v4sf)(__m128) (Y), (int) (C), (__v16sf)(__m512) (X), (__mmask16)(-1))) + +#define _mm512_inserti32x4(X, Y, C) \ + ((__m512i) __builtin_ia32_inserti32x4_mask ((__v16si)(__m512i) (X), \ + (__v4si)(__m128i) (Y), (int) (C), (__v16si)(__m512i) (X), (__mmask16)(-1))) + +#define _mm512_insertf64x4(X, Y, C) \ + ((__m512d) __builtin_ia32_insertf64x4_mask ((__v8df)(__m512d) (X), \ + (__v4df)(__m256d) (Y), (int) (C), \ + (__v8df)(__m512d)_mm512_undefined_pd(), \ + (__mmask8)-1)) + +#define _mm512_mask_insertf64x4(W, U, X, Y, C) \ + ((__m512d) __builtin_ia32_insertf64x4_mask ((__v8df)(__m512d) (X), \ + (__v4df)(__m256d) (Y), (int) (C), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U))) + +#define _mm512_maskz_insertf64x4(U, X, Y, C) \ + ((__m512d) __builtin_ia32_insertf64x4_mask ((__v8df)(__m512d) (X), \ + (__v4df)(__m256d) (Y), (int) (C), \ + (__v8df)(__m512d)_mm512_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm512_inserti64x4(X, Y, C) \ + ((__m512i) __builtin_ia32_inserti64x4_mask ((__v8di)(__m512i) (X), \ + (__v4di)(__m256i) (Y), (int) (C), \ + (__v8di)(__m512i)_mm512_undefined_epi32 (), \ + (__mmask8)-1)) + +#define _mm512_mask_inserti64x4(W, U, X, Y, C) \ + ((__m512i) __builtin_ia32_inserti64x4_mask ((__v8di)(__m512i) (X), \ + (__v4di)(__m256i) (Y), (int) (C),\ + (__v8di)(__m512i)(W),\ + (__mmask8)(U))) + +#define _mm512_maskz_inserti64x4(U, X, Y, C) \ + ((__m512i) __builtin_ia32_inserti64x4_mask ((__v8di)(__m512i) (X), \ + (__v4di)(__m256i) (Y), (int) (C), \ + (__v8di)(__m512i)_mm512_setzero_si512 (), \ + (__mmask8)(U))) +#endif + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_loadu_pd (void const *__P) +{ + return *(__m512d_u *)__P; +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadupd512_mask ((const double *) __P, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_loadu_pd (__mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadupd512_mask ((const double *) __P, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_storeu_pd (void *__P, __m512d __A) +{ + *(__m512d_u *)__P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_storeu_pd (void *__P, __mmask8 __U, __m512d __A) +{ + __builtin_ia32_storeupd512_mask ((double *) __P, (__v8df) __A, + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_loadu_ps (void const *__P) +{ + return *(__m512_u *)__P; +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadups512_mask ((const float *) __P, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_loadu_ps (__mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadups512_mask ((const float *) __P, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_storeu_ps (void *__P, __m512 __A) +{ + *(__m512_u *)__P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_storeu_ps (void *__P, __mmask16 __U, __m512 __A) +{ + __builtin_ia32_storeups512_mask ((float *) __P, (__v16sf) __A, + (__mmask16) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_load_ss (__m128 __W, __mmask8 __U, const float *__P) +{ + return (__m128) __builtin_ia32_loadss_mask (__P, (__v4sf) __W, __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_load_ss (__mmask8 __U, const float *__P) +{ + return (__m128) __builtin_ia32_loadss_mask (__P, (__v4sf) _mm_setzero_ps (), + __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_load_sd (__m128d __W, __mmask8 __U, const double *__P) +{ + return (__m128d) __builtin_ia32_loadsd_mask (__P, (__v2df) __W, __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_load_sd (__mmask8 __U, const double *__P) +{ + return (__m128d) __builtin_ia32_loadsd_mask (__P, (__v2df) _mm_setzero_pd (), + __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movess_mask ((__v4sf) __A, (__v4sf) __B, + (__v4sf) __W, __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movess_mask ((__v4sf) __A, (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movesd_mask ((__v2df) __A, (__v2df) __B, + (__v2df) __W, __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movesd_mask ((__v2df) __A, (__v2df) __B, + (__v2df) _mm_setzero_pd (), + __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_store_ss (float *__P, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_storess_mask (__P, (__v4sf) __A, (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_store_sd (double *__P, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_storesd_mask (__P, (__v2df) __A, (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_loadu_epi64 (void const *__P) +{ + return *(__m512i_u *) __P; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_loadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_storeu_epi64 (void *__P, __m512i __A) +{ + *(__m512i_u *) __P = (__m512i_u) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_storeu_epi64 (void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_storedqudi512_mask ((long long *) __P, (__v8di) __A, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_loadu_si512 (void const *__P) +{ + return *(__m512i_u *)__P; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_loadu_epi32 (void const *__P) +{ + return *(__m512i_u *) __P; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_loadu_epi32 (__mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_storeu_si512 (void *__P, __m512i __A) +{ + *(__m512i_u *)__P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_storeu_epi32 (void *__P, __m512i __A) +{ + *(__m512i_u *) __P = (__m512i_u) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_storeu_epi32 (void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_storedqusi512_mask ((int *) __P, (__v16si) __A, + (__mmask16) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutevar_pd (__m512d __A, __m512i __C) +{ + return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A, + (__v8di) __C, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutevar_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512i __C) +{ + return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A, + (__v8di) __C, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutevar_pd (__mmask8 __U, __m512d __A, __m512i __C) +{ + return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A, + (__v8di) __C, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutevar_ps (__m512 __A, __m512i __C) +{ + return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A, + (__v16si) __C, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutevar_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512i __C) +{ + return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A, + (__v16si) __C, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutevar_ps (__mmask16 __U, __m512 __A, __m512i __C) +{ + return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A, + (__v16si) __C, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutex2var_epi64 (__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I + /* idx */ , + (__v8di) __A, + (__v8di) __B, + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutex2var_epi64 (__m512i __A, __mmask8 __U, __m512i __I, + __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I + /* idx */ , + (__v8di) __A, + (__v8di) __B, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask2_permutex2var_epi64 (__m512i __A, __m512i __I, + __mmask8 __U, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermi2varq512_mask ((__v8di) __A, + (__v8di) __I + /* idx */ , + (__v8di) __B, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutex2var_epi64 (__mmask8 __U, __m512i __A, + __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varq512_maskz ((__v8di) __I + /* idx */ , + (__v8di) __A, + (__v8di) __B, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutex2var_epi32 (__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I + /* idx */ , + (__v16si) __A, + (__v16si) __B, + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutex2var_epi32 (__m512i __A, __mmask16 __U, + __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I + /* idx */ , + (__v16si) __A, + (__v16si) __B, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask2_permutex2var_epi32 (__m512i __A, __m512i __I, + __mmask16 __U, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermi2vard512_mask ((__v16si) __A, + (__v16si) __I + /* idx */ , + (__v16si) __B, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutex2var_epi32 (__mmask16 __U, __m512i __A, + __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2vard512_maskz ((__v16si) __I + /* idx */ , + (__v16si) __A, + (__v16si) __B, + (__mmask16) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutex2var_pd (__m512d __A, __m512i __I, __m512d __B) +{ + return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I + /* idx */ , + (__v8df) __A, + (__v8df) __B, + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutex2var_pd (__m512d __A, __mmask8 __U, __m512i __I, + __m512d __B) +{ + return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I + /* idx */ , + (__v8df) __A, + (__v8df) __B, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask2_permutex2var_pd (__m512d __A, __m512i __I, __mmask8 __U, + __m512d __B) +{ + return (__m512d) __builtin_ia32_vpermi2varpd512_mask ((__v8df) __A, + (__v8di) __I + /* idx */ , + (__v8df) __B, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutex2var_pd (__mmask8 __U, __m512d __A, __m512i __I, + __m512d __B) +{ + return (__m512d) __builtin_ia32_vpermt2varpd512_maskz ((__v8di) __I + /* idx */ , + (__v8df) __A, + (__v8df) __B, + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutex2var_ps (__m512 __A, __m512i __I, __m512 __B) +{ + return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I + /* idx */ , + (__v16sf) __A, + (__v16sf) __B, + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutex2var_ps (__m512 __A, __mmask16 __U, __m512i __I, __m512 __B) +{ + return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I + /* idx */ , + (__v16sf) __A, + (__v16sf) __B, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask2_permutex2var_ps (__m512 __A, __m512i __I, __mmask16 __U, + __m512 __B) +{ + return (__m512) __builtin_ia32_vpermi2varps512_mask ((__v16sf) __A, + (__v16si) __I + /* idx */ , + (__v16sf) __B, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutex2var_ps (__mmask16 __U, __m512 __A, __m512i __I, + __m512 __B) +{ + return (__m512) __builtin_ia32_vpermt2varps512_maskz ((__v16si) __I + /* idx */ , + (__v16sf) __A, + (__v16sf) __B, + (__mmask16) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permute_pd (__m512d __X, const int __C) +{ + return (__m512d) __builtin_ia32_vpermilpd512_mask ((__v8df) __X, __C, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permute_pd (__m512d __W, __mmask8 __U, __m512d __X, const int __C) +{ + return (__m512d) __builtin_ia32_vpermilpd512_mask ((__v8df) __X, __C, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permute_pd (__mmask8 __U, __m512d __X, const int __C) +{ + return (__m512d) __builtin_ia32_vpermilpd512_mask ((__v8df) __X, __C, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permute_ps (__m512 __X, const int __C) +{ + return (__m512) __builtin_ia32_vpermilps512_mask ((__v16sf) __X, __C, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permute_ps (__m512 __W, __mmask16 __U, __m512 __X, const int __C) +{ + return (__m512) __builtin_ia32_vpermilps512_mask ((__v16sf) __X, __C, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permute_ps (__mmask16 __U, __m512 __X, const int __C) +{ + return (__m512) __builtin_ia32_vpermilps512_mask ((__v16sf) __X, __C, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} +#else +#define _mm512_permute_pd(X, C) \ + ((__m512d) __builtin_ia32_vpermilpd512_mask ((__v8df)(__m512d)(X), (int)(C), \ + (__v8df)(__m512d)_mm512_undefined_pd(),\ + (__mmask8)(-1))) + +#define _mm512_mask_permute_pd(W, U, X, C) \ + ((__m512d) __builtin_ia32_vpermilpd512_mask ((__v8df)(__m512d)(X), (int)(C), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U))) + +#define _mm512_maskz_permute_pd(U, X, C) \ + ((__m512d) __builtin_ia32_vpermilpd512_mask ((__v8df)(__m512d)(X), (int)(C), \ + (__v8df)(__m512d)_mm512_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm512_permute_ps(X, C) \ + ((__m512) __builtin_ia32_vpermilps512_mask ((__v16sf)(__m512)(X), (int)(C), \ + (__v16sf)(__m512)_mm512_undefined_ps(),\ + (__mmask16)(-1))) + +#define _mm512_mask_permute_ps(W, U, X, C) \ + ((__m512) __builtin_ia32_vpermilps512_mask ((__v16sf)(__m512)(X), (int)(C), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U))) + +#define _mm512_maskz_permute_ps(U, X, C) \ + ((__m512) __builtin_ia32_vpermilps512_mask ((__v16sf)(__m512)(X), (int)(C), \ + (__v16sf)(__m512)_mm512_setzero_ps(), \ + (__mmask16)(U))) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutex_epi64 (__m512i __X, const int __I) +{ + return (__m512i) __builtin_ia32_permdi512_mask ((__v8di) __X, __I, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) (-1)); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutex_epi64 (__m512i __W, __mmask8 __M, + __m512i __X, const int __I) +{ + return (__m512i) __builtin_ia32_permdi512_mask ((__v8di) __X, __I, + (__v8di) __W, + (__mmask8) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutex_epi64 (__mmask8 __M, __m512i __X, const int __I) +{ + return (__m512i) __builtin_ia32_permdi512_mask ((__v8di) __X, __I, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __M); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutex_pd (__m512d __X, const int __M) +{ + return (__m512d) __builtin_ia32_permdf512_mask ((__v8df) __X, __M, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutex_pd (__m512d __W, __mmask8 __U, __m512d __X, const int __M) +{ + return (__m512d) __builtin_ia32_permdf512_mask ((__v8df) __X, __M, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutex_pd (__mmask8 __U, __m512d __X, const int __M) +{ + return (__m512d) __builtin_ia32_permdf512_mask ((__v8df) __X, __M, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} +#else +#define _mm512_permutex_pd(X, M) \ + ((__m512d) __builtin_ia32_permdf512_mask ((__v8df)(__m512d)(X), (int)(M), \ + (__v8df)(__m512d)_mm512_undefined_pd(),\ + (__mmask8)-1)) + +#define _mm512_mask_permutex_pd(W, U, X, M) \ + ((__m512d) __builtin_ia32_permdf512_mask ((__v8df)(__m512d)(X), (int)(M), \ + (__v8df)(__m512d)(W), (__mmask8)(U))) + +#define _mm512_maskz_permutex_pd(U, X, M) \ + ((__m512d) __builtin_ia32_permdf512_mask ((__v8df)(__m512d)(X), (int)(M), \ + (__v8df)(__m512d)_mm512_setzero_pd(),\ + (__mmask8)(U))) + +#define _mm512_permutex_epi64(X, I) \ + ((__m512i) __builtin_ia32_permdi512_mask ((__v8di)(__m512i)(X), \ + (int)(I), \ + (__v8di)(__m512i) \ + (_mm512_undefined_epi32 ()),\ + (__mmask8)(-1))) + +#define _mm512_maskz_permutex_epi64(M, X, I) \ + ((__m512i) __builtin_ia32_permdi512_mask ((__v8di)(__m512i)(X), \ + (int)(I), \ + (__v8di)(__m512i) \ + (_mm512_setzero_si512 ()),\ + (__mmask8)(M))) + +#define _mm512_mask_permutex_epi64(W, M, X, I) \ + ((__m512i) __builtin_ia32_permdi512_mask ((__v8di)(__m512i)(X), \ + (int)(I), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(M))) +#endif + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutexvar_epi64 (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y, + (__v8di) __X, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutexvar_epi64 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y, + (__v8di) __X, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutexvar_epi64 (__m512i __W, __mmask8 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y, + (__v8di) __X, + (__v8di) __W, + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutexvar_epi32 (__mmask16 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y, + (__v16si) __X, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutexvar_epi32 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y, + (__v16si) __X, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y, + (__v16si) __X, + (__v16si) __W, + __M); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutexvar_pd (__m512i __X, __m512d __Y) +{ + return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y, + (__v8di) __X, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutexvar_pd (__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y) +{ + return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y, + (__v8di) __X, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutexvar_pd (__mmask8 __U, __m512i __X, __m512d __Y) +{ + return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y, + (__v8di) __X, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutexvar_ps (__m512i __X, __m512 __Y) +{ + return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y, + (__v16si) __X, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutexvar_ps (__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y) +{ + return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y, + (__v16si) __X, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutexvar_ps (__mmask16 __U, __m512i __X, __m512 __Y) +{ + return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y, + (__v16si) __X, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shuffle_ps (__m512 __M, __m512 __V, const int __imm) +{ + return (__m512) __builtin_ia32_shufps512_mask ((__v16sf) __M, + (__v16sf) __V, __imm, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shuffle_ps (__m512 __W, __mmask16 __U, __m512 __M, + __m512 __V, const int __imm) +{ + return (__m512) __builtin_ia32_shufps512_mask ((__v16sf) __M, + (__v16sf) __V, __imm, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shuffle_ps (__mmask16 __U, __m512 __M, __m512 __V, const int __imm) +{ + return (__m512) __builtin_ia32_shufps512_mask ((__v16sf) __M, + (__v16sf) __V, __imm, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shuffle_pd (__m512d __M, __m512d __V, const int __imm) +{ + return (__m512d) __builtin_ia32_shufpd512_mask ((__v8df) __M, + (__v8df) __V, __imm, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shuffle_pd (__m512d __W, __mmask8 __U, __m512d __M, + __m512d __V, const int __imm) +{ + return (__m512d) __builtin_ia32_shufpd512_mask ((__v8df) __M, + (__v8df) __V, __imm, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shuffle_pd (__mmask8 __U, __m512d __M, __m512d __V, + const int __imm) +{ + return (__m512d) __builtin_ia32_shufpd512_mask ((__v8df) __M, + (__v8df) __V, __imm, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fixupimm_round_pd (__m512d __A, __m512d __B, __m512i __C, + const int __imm, const int __R) +{ + return (__m512d) __builtin_ia32_fixupimmpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8di) __C, + __imm, + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fixupimm_round_pd (__m512d __A, __mmask8 __U, __m512d __B, + __m512i __C, const int __imm, const int __R) +{ + return (__m512d) __builtin_ia32_fixupimmpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8di) __C, + __imm, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fixupimm_round_pd (__mmask8 __U, __m512d __A, __m512d __B, + __m512i __C, const int __imm, const int __R) +{ + return (__m512d) __builtin_ia32_fixupimmpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8di) __C, + __imm, + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fixupimm_round_ps (__m512 __A, __m512 __B, __m512i __C, + const int __imm, const int __R) +{ + return (__m512) __builtin_ia32_fixupimmps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16si) __C, + __imm, + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fixupimm_round_ps (__m512 __A, __mmask16 __U, __m512 __B, + __m512i __C, const int __imm, const int __R) +{ + return (__m512) __builtin_ia32_fixupimmps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16si) __C, + __imm, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fixupimm_round_ps (__mmask16 __U, __m512 __A, __m512 __B, + __m512i __C, const int __imm, const int __R) +{ + return (__m512) __builtin_ia32_fixupimmps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16si) __C, + __imm, + (__mmask16) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fixupimm_round_sd (__m128d __A, __m128d __B, __m128i __C, + const int __imm, const int __R) +{ + return (__m128d) __builtin_ia32_fixupimmsd_mask ((__v2df) __A, + (__v2df) __B, + (__v2di) __C, __imm, + (__mmask8) -1, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fixupimm_round_sd (__m128d __A, __mmask8 __U, __m128d __B, + __m128i __C, const int __imm, const int __R) +{ + return (__m128d) __builtin_ia32_fixupimmsd_mask ((__v2df) __A, + (__v2df) __B, + (__v2di) __C, __imm, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fixupimm_round_sd (__mmask8 __U, __m128d __A, __m128d __B, + __m128i __C, const int __imm, const int __R) +{ + return (__m128d) __builtin_ia32_fixupimmsd_maskz ((__v2df) __A, + (__v2df) __B, + (__v2di) __C, + __imm, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fixupimm_round_ss (__m128 __A, __m128 __B, __m128i __C, + const int __imm, const int __R) +{ + return (__m128) __builtin_ia32_fixupimmss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4si) __C, __imm, + (__mmask8) -1, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fixupimm_round_ss (__m128 __A, __mmask8 __U, __m128 __B, + __m128i __C, const int __imm, const int __R) +{ + return (__m128) __builtin_ia32_fixupimmss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4si) __C, __imm, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fixupimm_round_ss (__mmask8 __U, __m128 __A, __m128 __B, + __m128i __C, const int __imm, const int __R) +{ + return (__m128) __builtin_ia32_fixupimmss_maskz ((__v4sf) __A, + (__v4sf) __B, + (__v4si) __C, __imm, + (__mmask8) __U, __R); +} + +#else +#define _mm512_shuffle_pd(X, Y, C) \ + ((__m512d)__builtin_ia32_shufpd512_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (int)(C),\ + (__v8df)(__m512d)_mm512_undefined_pd(),\ + (__mmask8)-1)) + +#define _mm512_mask_shuffle_pd(W, U, X, Y, C) \ + ((__m512d)__builtin_ia32_shufpd512_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (int)(C),\ + (__v8df)(__m512d)(W),\ + (__mmask8)(U))) + +#define _mm512_maskz_shuffle_pd(U, X, Y, C) \ + ((__m512d)__builtin_ia32_shufpd512_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (int)(C),\ + (__v8df)(__m512d)_mm512_setzero_pd(),\ + (__mmask8)(U))) + +#define _mm512_shuffle_ps(X, Y, C) \ + ((__m512)__builtin_ia32_shufps512_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (int)(C),\ + (__v16sf)(__m512)_mm512_undefined_ps(),\ + (__mmask16)-1)) + +#define _mm512_mask_shuffle_ps(W, U, X, Y, C) \ + ((__m512)__builtin_ia32_shufps512_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (int)(C),\ + (__v16sf)(__m512)(W),\ + (__mmask16)(U))) + +#define _mm512_maskz_shuffle_ps(U, X, Y, C) \ + ((__m512)__builtin_ia32_shufps512_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (int)(C),\ + (__v16sf)(__m512)_mm512_setzero_ps(),\ + (__mmask16)(U))) + +#define _mm512_fixupimm_round_pd(X, Y, Z, C, R) \ + ((__m512d)__builtin_ia32_fixupimmpd512_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (__v8di)(__m512i)(Z), (int)(C), \ + (__mmask8)(-1), (R))) + +#define _mm512_mask_fixupimm_round_pd(X, U, Y, Z, C, R) \ + ((__m512d)__builtin_ia32_fixupimmpd512_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (__v8di)(__m512i)(Z), (int)(C), \ + (__mmask8)(U), (R))) + +#define _mm512_maskz_fixupimm_round_pd(U, X, Y, Z, C, R) \ + ((__m512d)__builtin_ia32_fixupimmpd512_maskz ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (__v8di)(__m512i)(Z), (int)(C), \ + (__mmask8)(U), (R))) + +#define _mm512_fixupimm_round_ps(X, Y, Z, C, R) \ + ((__m512)__builtin_ia32_fixupimmps512_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), (int)(C), \ + (__mmask16)(-1), (R))) + +#define _mm512_mask_fixupimm_round_ps(X, U, Y, Z, C, R) \ + ((__m512)__builtin_ia32_fixupimmps512_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), (int)(C), \ + (__mmask16)(U), (R))) + +#define _mm512_maskz_fixupimm_round_ps(U, X, Y, Z, C, R) \ + ((__m512)__builtin_ia32_fixupimmps512_maskz ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), (int)(C), \ + (__mmask16)(U), (R))) + +#define _mm_fixupimm_round_sd(X, Y, Z, C, R) \ + ((__m128d)__builtin_ia32_fixupimmsd_mask ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \ + (__mmask8)(-1), (R))) + +#define _mm_mask_fixupimm_round_sd(X, U, Y, Z, C, R) \ + ((__m128d)__builtin_ia32_fixupimmsd_mask ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \ + (__mmask8)(U), (R))) + +#define _mm_maskz_fixupimm_round_sd(U, X, Y, Z, C, R) \ + ((__m128d)__builtin_ia32_fixupimmsd_maskz ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \ + (__mmask8)(U), (R))) + +#define _mm_fixupimm_round_ss(X, Y, Z, C, R) \ + ((__m128)__builtin_ia32_fixupimmss_mask ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \ + (__mmask8)(-1), (R))) + +#define _mm_mask_fixupimm_round_ss(X, U, Y, Z, C, R) \ + ((__m128)__builtin_ia32_fixupimmss_mask ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \ + (__mmask8)(U), (R))) + +#define _mm_maskz_fixupimm_round_ss(U, X, Y, Z, C, R) \ + ((__m128)__builtin_ia32_fixupimmss_maskz ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \ + (__mmask8)(U), (R))) +#endif + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_movehdup_ps (__m512 __A) +{ + return (__m512) __builtin_ia32_movshdup512_mask ((__v16sf) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_movehdup_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_movshdup512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_movshdup512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_moveldup_ps (__m512 __A) +{ + return (__m512) __builtin_ia32_movsldup512_mask ((__v16sf) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_moveldup_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_movsldup512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_movsldup512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_or_si512 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A | (__v16su) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_or_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A | (__v16su) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_or_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pord512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_or_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pord512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_or_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8du) __A | (__v8du) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_or_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_porq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_or_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_porq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_xor_si512 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A ^ (__v16su) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_xor_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A ^ (__v16su) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_xor_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pxord512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_xor_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pxord512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_xor_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8du) __A ^ (__v8du) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_xor_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pxorq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_xor_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pxorq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rol_epi32 (__m512i __A, const int __B) +{ + return (__m512i) __builtin_ia32_prold512_mask ((__v16si) __A, __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rol_epi32 (__m512i __W, __mmask16 __U, __m512i __A, const int __B) +{ + return (__m512i) __builtin_ia32_prold512_mask ((__v16si) __A, __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rol_epi32 (__mmask16 __U, __m512i __A, const int __B) +{ + return (__m512i) __builtin_ia32_prold512_mask ((__v16si) __A, __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_ror_epi32 (__m512i __A, int __B) +{ + return (__m512i) __builtin_ia32_prord512_mask ((__v16si) __A, __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_ror_epi32 (__m512i __W, __mmask16 __U, __m512i __A, int __B) +{ + return (__m512i) __builtin_ia32_prord512_mask ((__v16si) __A, __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_ror_epi32 (__mmask16 __U, __m512i __A, int __B) +{ + return (__m512i) __builtin_ia32_prord512_mask ((__v16si) __A, __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rol_epi64 (__m512i __A, const int __B) +{ + return (__m512i) __builtin_ia32_prolq512_mask ((__v8di) __A, __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rol_epi64 (__m512i __W, __mmask8 __U, __m512i __A, const int __B) +{ + return (__m512i) __builtin_ia32_prolq512_mask ((__v8di) __A, __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rol_epi64 (__mmask8 __U, __m512i __A, const int __B) +{ + return (__m512i) __builtin_ia32_prolq512_mask ((__v8di) __A, __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_ror_epi64 (__m512i __A, int __B) +{ + return (__m512i) __builtin_ia32_prorq512_mask ((__v8di) __A, __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_ror_epi64 (__m512i __W, __mmask8 __U, __m512i __A, int __B) +{ + return (__m512i) __builtin_ia32_prorq512_mask ((__v8di) __A, __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_ror_epi64 (__mmask8 __U, __m512i __A, int __B) +{ + return (__m512i) __builtin_ia32_prorq512_mask ((__v8di) __A, __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +#else +#define _mm512_rol_epi32(A, B) \ + ((__m512i)__builtin_ia32_prold512_mask ((__v16si)(__m512i)(A), \ + (int)(B), \ + (__v16si)_mm512_undefined_epi32 (), \ + (__mmask16)(-1))) +#define _mm512_mask_rol_epi32(W, U, A, B) \ + ((__m512i)__builtin_ia32_prold512_mask ((__v16si)(__m512i)(A), \ + (int)(B), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U))) +#define _mm512_maskz_rol_epi32(U, A, B) \ + ((__m512i)__builtin_ia32_prold512_mask ((__v16si)(__m512i)(A), \ + (int)(B), \ + (__v16si)_mm512_setzero_si512 (), \ + (__mmask16)(U))) +#define _mm512_ror_epi32(A, B) \ + ((__m512i)__builtin_ia32_prord512_mask ((__v16si)(__m512i)(A), \ + (int)(B), \ + (__v16si)_mm512_undefined_epi32 (), \ + (__mmask16)(-1))) +#define _mm512_mask_ror_epi32(W, U, A, B) \ + ((__m512i)__builtin_ia32_prord512_mask ((__v16si)(__m512i)(A), \ + (int)(B), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U))) +#define _mm512_maskz_ror_epi32(U, A, B) \ + ((__m512i)__builtin_ia32_prord512_mask ((__v16si)(__m512i)(A), \ + (int)(B), \ + (__v16si)_mm512_setzero_si512 (), \ + (__mmask16)(U))) +#define _mm512_rol_epi64(A, B) \ + ((__m512i)__builtin_ia32_prolq512_mask ((__v8di)(__m512i)(A), \ + (int)(B), \ + (__v8di)_mm512_undefined_epi32 (), \ + (__mmask8)(-1))) +#define _mm512_mask_rol_epi64(W, U, A, B) \ + ((__m512i)__builtin_ia32_prolq512_mask ((__v8di)(__m512i)(A), \ + (int)(B), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U))) +#define _mm512_maskz_rol_epi64(U, A, B) \ + ((__m512i)__builtin_ia32_prolq512_mask ((__v8di)(__m512i)(A), \ + (int)(B), \ + (__v8di)_mm512_setzero_si512 (), \ + (__mmask8)(U))) + +#define _mm512_ror_epi64(A, B) \ + ((__m512i)__builtin_ia32_prorq512_mask ((__v8di)(__m512i)(A), \ + (int)(B), \ + (__v8di)_mm512_undefined_epi32 (), \ + (__mmask8)(-1))) +#define _mm512_mask_ror_epi64(W, U, A, B) \ + ((__m512i)__builtin_ia32_prorq512_mask ((__v8di)(__m512i)(A), \ + (int)(B), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U))) +#define _mm512_maskz_ror_epi64(U, A, B) \ + ((__m512i)__builtin_ia32_prorq512_mask ((__v8di)(__m512i)(A), \ + (int)(B), \ + (__v8di)_mm512_setzero_si512 (), \ + (__mmask8)(U))) +#endif + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_and_si512 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A & (__v16su) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_and_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A & (__v16su) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_and_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_and_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_and_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8du) __A & (__v8du) __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_and_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_and_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_pd (), + __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_andnot_si512 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_andnot_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_andnot_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_andnot_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_andnot_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_andnot_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_andnot_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_pd (), + __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_test_epi32_mask (__m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_ptestmd512 ((__v16si) __A, + (__v16si) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_test_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_ptestmd512 ((__v16si) __A, + (__v16si) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_test_epi64_mask (__m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_ptestmq512 ((__v8di) __A, + (__v8di) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_test_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_ptestmq512 ((__v8di) __A, (__v8di) __B, __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_testn_epi32_mask (__m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_ptestnmd512 ((__v16si) __A, + (__v16si) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_testn_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_ptestnmd512 ((__v16si) __A, + (__v16si) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_testn_epi64_mask (__m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmq512 ((__v8di) __A, + (__v8di) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmq512 ((__v8di) __A, + (__v8di) __B, __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_abs_ps (__m512 __A) +{ + return (__m512) _mm512_and_epi32 ((__m512i) __A, + _mm512_set1_epi32 (0x7fffffff)); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_abs_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) _mm512_mask_and_epi32 ((__m512i) __W, __U, (__m512i) __A, + _mm512_set1_epi32 (0x7fffffff)); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_abs_pd (__m512d __A) +{ + return (__m512d) _mm512_and_epi64 ((__m512i) __A, + _mm512_set1_epi64 (0x7fffffffffffffffLL)); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_abs_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) + _mm512_mask_and_epi64 ((__m512i) __W, __U, (__m512i) __A, + _mm512_set1_epi64 (0x7fffffffffffffffLL)); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_unpackhi_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckhdq512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_unpackhi_epi32 (__m512i __W, __mmask16 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckhdq512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_unpackhi_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckhdq512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_unpackhi_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckhqdq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_unpackhi_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckhqdq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_unpackhi_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckhqdq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_unpacklo_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckldq512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_unpacklo_epi32 (__m512i __W, __mmask16 __U, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckldq512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_unpacklo_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpckldq512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_unpacklo_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpcklqdq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_unpacklo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpcklqdq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_punpcklqdq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +#ifdef __x86_64__ +#ifdef __OPTIMIZE__ +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundss_u64 (__m128 __A, const int __R) +{ + return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf) __A, __R); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundss_si64 (__m128 __A, const int __R) +{ + return (long long) __builtin_ia32_vcvtss2si64 ((__v4sf) __A, __R); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundss_i64 (__m128 __A, const int __R) +{ + return (long long) __builtin_ia32_vcvtss2si64 ((__v4sf) __A, __R); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundss_u64 (__m128 __A, const int __R) +{ + return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf) __A, __R); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundss_i64 (__m128 __A, const int __R) +{ + return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A, __R); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundss_si64 (__m128 __A, const int __R) +{ + return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A, __R); +} +#else +#define _mm_cvt_roundss_u64(A, B) \ + ((unsigned long long)__builtin_ia32_vcvtss2usi64(A, B)) + +#define _mm_cvt_roundss_si64(A, B) \ + ((long long)__builtin_ia32_vcvtss2si64(A, B)) + +#define _mm_cvt_roundss_i64(A, B) \ + ((long long)__builtin_ia32_vcvtss2si64(A, B)) + +#define _mm_cvtt_roundss_u64(A, B) \ + ((unsigned long long)__builtin_ia32_vcvttss2usi64(A, B)) + +#define _mm_cvtt_roundss_i64(A, B) \ + ((long long)__builtin_ia32_vcvttss2si64(A, B)) + +#define _mm_cvtt_roundss_si64(A, B) \ + ((long long)__builtin_ia32_vcvttss2si64(A, B)) +#endif +#endif + +#ifdef __OPTIMIZE__ +extern __inline unsigned +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundss_u32 (__m128 __A, const int __R) +{ + return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A, __R); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundss_si32 (__m128 __A, const int __R) +{ + return (int) __builtin_ia32_vcvtss2si32 ((__v4sf) __A, __R); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundss_i32 (__m128 __A, const int __R) +{ + return (int) __builtin_ia32_vcvtss2si32 ((__v4sf) __A, __R); +} + +extern __inline unsigned +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundss_u32 (__m128 __A, const int __R) +{ + return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A, __R); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundss_i32 (__m128 __A, const int __R) +{ + return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A, __R); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundss_si32 (__m128 __A, const int __R) +{ + return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A, __R); +} +#else +#define _mm_cvt_roundss_u32(A, B) \ + ((unsigned)__builtin_ia32_vcvtss2usi32(A, B)) + +#define _mm_cvt_roundss_si32(A, B) \ + ((int)__builtin_ia32_vcvtss2si32(A, B)) + +#define _mm_cvt_roundss_i32(A, B) \ + ((int)__builtin_ia32_vcvtss2si32(A, B)) + +#define _mm_cvtt_roundss_u32(A, B) \ + ((unsigned)__builtin_ia32_vcvttss2usi32(A, B)) + +#define _mm_cvtt_roundss_si32(A, B) \ + ((int)__builtin_ia32_vcvttss2si32(A, B)) + +#define _mm_cvtt_roundss_i32(A, B) \ + ((int)__builtin_ia32_vcvttss2si32(A, B)) +#endif + +#ifdef __x86_64__ +#ifdef __OPTIMIZE__ +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsd_u64 (__m128d __A, const int __R) +{ + return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df) __A, __R); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsd_si64 (__m128d __A, const int __R) +{ + return (long long) __builtin_ia32_vcvtsd2si64 ((__v2df) __A, __R); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsd_i64 (__m128d __A, const int __R) +{ + return (long long) __builtin_ia32_vcvtsd2si64 ((__v2df) __A, __R); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundsd_u64 (__m128d __A, const int __R) +{ + return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df) __A, __R); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundsd_si64 (__m128d __A, const int __R) +{ + return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A, __R); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundsd_i64 (__m128d __A, const int __R) +{ + return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A, __R); +} +#else +#define _mm_cvt_roundsd_u64(A, B) \ + ((unsigned long long)__builtin_ia32_vcvtsd2usi64(A, B)) + +#define _mm_cvt_roundsd_si64(A, B) \ + ((long long)__builtin_ia32_vcvtsd2si64(A, B)) + +#define _mm_cvt_roundsd_i64(A, B) \ + ((long long)__builtin_ia32_vcvtsd2si64(A, B)) + +#define _mm_cvtt_roundsd_u64(A, B) \ + ((unsigned long long)__builtin_ia32_vcvttsd2usi64(A, B)) + +#define _mm_cvtt_roundsd_si64(A, B) \ + ((long long)__builtin_ia32_vcvttsd2si64(A, B)) + +#define _mm_cvtt_roundsd_i64(A, B) \ + ((long long)__builtin_ia32_vcvttsd2si64(A, B)) +#endif +#endif + +#ifdef __OPTIMIZE__ +extern __inline unsigned +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsd_u32 (__m128d __A, const int __R) +{ + return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A, __R); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsd_si32 (__m128d __A, const int __R) +{ + return (int) __builtin_ia32_vcvtsd2si32 ((__v2df) __A, __R); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsd_i32 (__m128d __A, const int __R) +{ + return (int) __builtin_ia32_vcvtsd2si32 ((__v2df) __A, __R); +} + +extern __inline unsigned +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundsd_u32 (__m128d __A, const int __R) +{ + return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A, __R); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundsd_i32 (__m128d __A, const int __R) +{ + return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A, __R); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundsd_si32 (__m128d __A, const int __R) +{ + return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A, __R); +} +#else +#define _mm_cvt_roundsd_u32(A, B) \ + ((unsigned)__builtin_ia32_vcvtsd2usi32(A, B)) + +#define _mm_cvt_roundsd_si32(A, B) \ + ((int)__builtin_ia32_vcvtsd2si32(A, B)) + +#define _mm_cvt_roundsd_i32(A, B) \ + ((int)__builtin_ia32_vcvtsd2si32(A, B)) + +#define _mm_cvtt_roundsd_u32(A, B) \ + ((unsigned)__builtin_ia32_vcvttsd2usi32(A, B)) + +#define _mm_cvtt_roundsd_si32(A, B) \ + ((int)__builtin_ia32_vcvttsd2si32(A, B)) + +#define _mm_cvtt_roundsd_i32(A, B) \ + ((int)__builtin_ia32_vcvttsd2si32(A, B)) +#endif + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_movedup_pd (__m512d __A) +{ + return (__m512d) __builtin_ia32_movddup512_mask ((__v8df) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_movedup_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_movddup512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_movddup512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_unpacklo_pd (__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_unpcklpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_unpacklo_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_unpcklpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_unpcklpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_unpackhi_pd (__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_unpckhpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_unpackhi_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_unpckhpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_unpackhi_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_unpckhpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_unpackhi_ps (__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_unpckhps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_unpackhi_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_unpckhps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_unpckhps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundps_pd (__m256 __A, const int __R) +{ + return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundps_pd (__m512d __W, __mmask8 __U, __m256 __A, + const int __R) +{ + return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundps_pd (__mmask8 __U, __m256 __A, const int __R) +{ + return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundph_ps (__m256i __A, const int __R) +{ + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundph_ps (__m512 __W, __mmask16 __U, __m256i __A, + const int __R) +{ + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundph_ps (__mmask16 __U, __m256i __A, const int __R) +{ + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundps_ph (__m512 __A, const int __I) +{ + return (__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf) __A, + __I, + (__v16hi) + _mm256_undefined_si256 (), + -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtps_ph (__m512 __A, const int __I) +{ + return (__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf) __A, + __I, + (__v16hi) + _mm256_undefined_si256 (), + -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundps_ph (__m256i __U, __mmask16 __W, __m512 __A, + const int __I) +{ + return (__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf) __A, + __I, + (__v16hi) __U, + (__mmask16) __W); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtps_ph (__m256i __U, __mmask16 __W, __m512 __A, const int __I) +{ + return (__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf) __A, + __I, + (__v16hi) __U, + (__mmask16) __W); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundps_ph (__mmask16 __W, __m512 __A, const int __I) +{ + return (__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf) __A, + __I, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __W); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtps_ph (__mmask16 __W, __m512 __A, const int __I) +{ + return (__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf) __A, + __I, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __W); +} +#else +#define _mm512_cvt_roundps_pd(A, B) \ + (__m512d)__builtin_ia32_cvtps2pd512_mask(A, (__v8df)_mm512_undefined_pd(), -1, B) + +#define _mm512_mask_cvt_roundps_pd(W, U, A, B) \ + (__m512d)__builtin_ia32_cvtps2pd512_mask(A, (__v8df)(W), U, B) + +#define _mm512_maskz_cvt_roundps_pd(U, A, B) \ + (__m512d)__builtin_ia32_cvtps2pd512_mask(A, (__v8df)_mm512_setzero_pd(), U, B) + +#define _mm512_cvt_roundph_ps(A, B) \ + (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(A), (__v16sf)_mm512_undefined_ps(), -1, B) + +#define _mm512_mask_cvt_roundph_ps(W, U, A, B) \ + (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(A), (__v16sf)(W), U, B) + +#define _mm512_maskz_cvt_roundph_ps(U, A, B) \ + (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(A), (__v16sf)_mm512_setzero_ps(), U, B) + +#define _mm512_cvt_roundps_ph(A, I) \ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ + (__v16hi)_mm256_undefined_si256 (), -1)) +#define _mm512_cvtps_ph(A, I) \ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ + (__v16hi)_mm256_undefined_si256 (), -1)) +#define _mm512_mask_cvt_roundps_ph(U, W, A, I) \ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ + (__v16hi)(__m256i)(U), (__mmask16) (W))) +#define _mm512_mask_cvtps_ph(U, W, A, I) \ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ + (__v16hi)(__m256i)(U), (__mmask16) (W))) +#define _mm512_maskz_cvt_roundps_ph(W, A, I) \ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ + (__v16hi)_mm256_setzero_si256 (), (__mmask16) (W))) +#define _mm512_maskz_cvtps_ph(W, A, I) \ + ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\ + (__v16hi)_mm256_setzero_si256 (), (__mmask16) (W))) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundpd_ps (__m512d __A, const int __R) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) + _mm256_undefined_ps (), + (__mmask8) -1, __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundpd_ps (__m256 __W, __mmask8 __U, __m512d __A, + const int __R) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) __W, + (__mmask8) __U, __R); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundpd_ps (__mmask8 __U, __m512d __A, const int __R) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsd_ss (__m128 __A, __m128d __B, const int __R) +{ + return (__m128) __builtin_ia32_cvtsd2ss_round ((__v4sf) __A, + (__v2df) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvt_roundsd_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128d __B, const int __R) +{ + return (__m128) __builtin_ia32_cvtsd2ss_mask_round ((__v4sf) __A, + (__v2df) __B, + (__v4sf) __W, + __U, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvt_roundsd_ss (__mmask8 __U, __m128 __A, + __m128d __B, const int __R) +{ + return (__m128) __builtin_ia32_cvtsd2ss_mask_round ((__v4sf) __A, + (__v2df) __B, + _mm_setzero_ps (), + __U, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundss_sd (__m128d __A, __m128 __B, const int __R) +{ + return (__m128d) __builtin_ia32_cvtss2sd_round ((__v2df) __A, + (__v4sf) __B, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvt_roundss_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128 __B, const int __R) +{ + return (__m128d) __builtin_ia32_cvtss2sd_mask_round ((__v2df) __A, + (__v4sf) __B, + (__v2df) __W, + __U, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvt_roundss_sd (__mmask8 __U, __m128d __A, + __m128 __B, const int __R) +{ + return (__m128d) __builtin_ia32_cvtss2sd_mask_round ((__v2df) __A, + (__v4sf) __B, + _mm_setzero_pd (), + __U, + __R); +} +#else +#define _mm512_cvt_roundpd_ps(A, B) \ + (__m256)__builtin_ia32_cvtpd2ps512_mask(A, (__v8sf)_mm256_undefined_ps(), -1, B) + +#define _mm512_mask_cvt_roundpd_ps(W, U, A, B) \ + (__m256)__builtin_ia32_cvtpd2ps512_mask(A, (__v8sf)(W), U, B) + +#define _mm512_maskz_cvt_roundpd_ps(U, A, B) \ + (__m256)__builtin_ia32_cvtpd2ps512_mask(A, (__v8sf)_mm256_setzero_ps(), U, B) + +#define _mm_cvt_roundsd_ss(A, B, C) \ + (__m128)__builtin_ia32_cvtsd2ss_round(A, B, C) + +#define _mm_mask_cvt_roundsd_ss(W, U, A, B, C) \ + (__m128)__builtin_ia32_cvtsd2ss_mask_round ((A), (B), (W), (U), (C)) + +#define _mm_maskz_cvt_roundsd_ss(U, A, B, C) \ + (__m128)__builtin_ia32_cvtsd2ss_mask_round ((A), (B), _mm_setzero_ps (), \ + (U), (C)) + +#define _mm_cvt_roundss_sd(A, B, C) \ + (__m128d)__builtin_ia32_cvtss2sd_round(A, B, C) + +#define _mm_mask_cvt_roundss_sd(W, U, A, B, C) \ + (__m128d)__builtin_ia32_cvtss2sd_mask_round ((A), (B), (W), (U), (C)) + +#define _mm_maskz_cvt_roundss_sd(U, A, B, C) \ + (__m128d)__builtin_ia32_cvtss2sd_mask_round ((A), (B), _mm_setzero_pd (), \ + (U), (C)) + +#endif + +#define _mm_mask_cvtss_sd(W, U, A, B) \ + _mm_mask_cvt_roundss_sd ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_cvtss_sd(U, A, B) \ + _mm_maskz_cvt_roundss_sd ((U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_cvtsd_ss(W, U, A, B) \ + _mm_mask_cvt_roundsd_ss ((W), (U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_cvtsd_ss(U, A, B) \ + _mm_maskz_cvt_roundsd_ss ((U), (A), (B), _MM_FROUND_CUR_DIRECTION) + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_stream_si512 (__m512i * __P, __m512i __A) +{ + __builtin_ia32_movntdq512 ((__v8di *) __P, (__v8di) __A); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_stream_ps (float *__P, __m512 __A) +{ + __builtin_ia32_movntps512 (__P, (__v16sf) __A); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_stream_pd (double *__P, __m512d __A) +{ + __builtin_ia32_movntpd512 (__P, (__v8df) __A); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_stream_load_si512 (void *__P) +{ + return __builtin_ia32_movntdqa512 ((__v8di *)__P); +} + +/* Constants for mantissa extraction */ +typedef enum +{ + _MM_MANT_NORM_1_2, /* interval [1, 2) */ + _MM_MANT_NORM_p5_2, /* interval [0.5, 2) */ + _MM_MANT_NORM_p5_1, /* interval [0.5, 1) */ + _MM_MANT_NORM_p75_1p5 /* interval [0.75, 1.5) */ +} _MM_MANTISSA_NORM_ENUM; + +typedef enum +{ + _MM_MANT_SIGN_src, /* sign = sign(SRC) */ + _MM_MANT_SIGN_zero, /* sign = 0 */ + _MM_MANT_SIGN_nan /* DEST = NaN if sign(SRC) = 1 */ +} _MM_MANTISSA_SIGN_ENUM; + +#ifdef __OPTIMIZE__ +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getexp_round_ss (__m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_getexpss128_round ((__v4sf) __A, + (__v4sf) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getexp_round_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_getexpss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getexp_round_ss (__mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_getexpss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getexp_round_sd (__m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_getexpsd128_round ((__v2df) __A, + (__v2df) __B, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getexp_round_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_getexpsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getexp_round_sd (__mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_getexpsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_getexp_round_ps (__m512 __A, const int __R) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_getexp_round_ps (__m512 __W, __mmask16 __U, __m512 __A, + const int __R) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_getexp_round_ps (__mmask16 __U, __m512 __A, const int __R) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_getexp_round_pd (__m512d __A, const int __R) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_getexp_round_pd (__m512d __W, __mmask8 __U, __m512d __A, + const int __R) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_getexp_round_pd (__mmask8 __U, __m512d __A, const int __R) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_getmant_round_pd (__m512d __A, _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C, const int __R) +{ + return (__m512d) __builtin_ia32_getmantpd512_mask ((__v8df) __A, + (__C << 2) | __B, + _mm512_undefined_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_getmant_round_pd (__m512d __W, __mmask8 __U, __m512d __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C, const int __R) +{ + return (__m512d) __builtin_ia32_getmantpd512_mask ((__v8df) __A, + (__C << 2) | __B, + (__v8df) __W, __U, + __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_getmant_round_pd (__mmask8 __U, __m512d __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C, const int __R) +{ + return (__m512d) __builtin_ia32_getmantpd512_mask ((__v8df) __A, + (__C << 2) | __B, + (__v8df) + _mm512_setzero_pd (), + __U, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_getmant_round_ps (__m512 __A, _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C, const int __R) +{ + return (__m512) __builtin_ia32_getmantps512_mask ((__v16sf) __A, + (__C << 2) | __B, + _mm512_undefined_ps (), + (__mmask16) -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_getmant_round_ps (__m512 __W, __mmask16 __U, __m512 __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C, const int __R) +{ + return (__m512) __builtin_ia32_getmantps512_mask ((__v16sf) __A, + (__C << 2) | __B, + (__v16sf) __W, __U, + __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_getmant_round_ps (__mmask16 __U, __m512 __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C, const int __R) +{ + return (__m512) __builtin_ia32_getmantps512_mask ((__v16sf) __A, + (__C << 2) | __B, + (__v16sf) + _mm512_setzero_ps (), + __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getmant_round_sd (__m128d __A, __m128d __B, + _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D, const int __R) +{ + return (__m128d) __builtin_ia32_getmantsd_round ((__v2df) __A, + (__v2df) __B, + (__D << 2) | __C, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getmant_round_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D, const int __R) +{ + return (__m128d) __builtin_ia32_getmantsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__D << 2) | __C, + (__v2df) __W, + __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getmant_round_sd (__mmask8 __U, __m128d __A, __m128d __B, + _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D, const int __R) +{ + return (__m128d) __builtin_ia32_getmantsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__D << 2) | __C, + (__v2df) + _mm_setzero_pd(), + __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getmant_round_ss (__m128 __A, __m128 __B, + _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D, const int __R) +{ + return (__m128) __builtin_ia32_getmantss_round ((__v4sf) __A, + (__v4sf) __B, + (__D << 2) | __C, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getmant_round_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B, _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D, const int __R) +{ + return (__m128) __builtin_ia32_getmantss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__D << 2) | __C, + (__v4sf) __W, + __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getmant_round_ss (__mmask8 __U, __m128 __A, __m128 __B, + _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D, const int __R) +{ + return (__m128) __builtin_ia32_getmantss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__D << 2) | __C, + (__v4sf) + _mm_setzero_ps(), + __U, __R); +} + +#else +#define _mm512_getmant_round_pd(X, B, C, R) \ + ((__m512d)__builtin_ia32_getmantpd512_mask ((__v8df)(__m512d)(X), \ + (int)(((C)<<2) | (B)), \ + (__v8df)(__m512d)_mm512_undefined_pd(), \ + (__mmask8)-1,\ + (R))) + +#define _mm512_mask_getmant_round_pd(W, U, X, B, C, R) \ + ((__m512d)__builtin_ia32_getmantpd512_mask ((__v8df)(__m512d)(X), \ + (int)(((C)<<2) | (B)), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U),\ + (R))) + +#define _mm512_maskz_getmant_round_pd(U, X, B, C, R) \ + ((__m512d)__builtin_ia32_getmantpd512_mask ((__v8df)(__m512d)(X), \ + (int)(((C)<<2) | (B)), \ + (__v8df)(__m512d)_mm512_setzero_pd(), \ + (__mmask8)(U),\ + (R))) +#define _mm512_getmant_round_ps(X, B, C, R) \ + ((__m512)__builtin_ia32_getmantps512_mask ((__v16sf)(__m512)(X), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)(__m512)_mm512_undefined_ps(), \ + (__mmask16)-1,\ + (R))) + +#define _mm512_mask_getmant_round_ps(W, U, X, B, C, R) \ + ((__m512)__builtin_ia32_getmantps512_mask ((__v16sf)(__m512)(X), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U),\ + (R))) + +#define _mm512_maskz_getmant_round_ps(U, X, B, C, R) \ + ((__m512)__builtin_ia32_getmantps512_mask ((__v16sf)(__m512)(X), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)(__m512)_mm512_setzero_ps(), \ + (__mmask16)(U),\ + (R))) +#define _mm_getmant_round_sd(X, Y, C, D, R) \ + ((__m128d)__builtin_ia32_getmantsd_round ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (int)(((D)<<2) | (C)), \ + (R))) + +#define _mm_mask_getmant_round_sd(W, U, X, Y, C, D, R) \ + ((__m128d)__builtin_ia32_getmantsd_mask_round ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U),\ + (R))) + +#define _mm_maskz_getmant_round_sd(U, X, Y, C, D, R) \ + ((__m128d)__builtin_ia32_getmantsd_mask_round ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v2df)(__m128d)_mm_setzero_pd(), \ + (__mmask8)(U),\ + (R))) + +#define _mm_getmant_round_ss(X, Y, C, D, R) \ + ((__m128)__builtin_ia32_getmantss_round ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), \ + (int)(((D)<<2) | (C)), \ + (R))) + +#define _mm_mask_getmant_round_ss(W, U, X, Y, C, D, R) \ + ((__m128)__builtin_ia32_getmantss_mask_round ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U),\ + (R))) + +#define _mm_maskz_getmant_round_ss(U, X, Y, C, D, R) \ + ((__m128)__builtin_ia32_getmantss_mask_round ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)(__m128)_mm_setzero_ps(), \ + (__mmask8)(U),\ + (R))) + +#define _mm_getexp_round_ss(A, B, R) \ + ((__m128)__builtin_ia32_getexpss128_round((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), R)) + +#define _mm_mask_getexp_round_ss(W, U, A, B, C) \ + (__m128)__builtin_ia32_getexpss_mask_round(A, B, W, U, C) + +#define _mm_maskz_getexp_round_ss(U, A, B, C) \ + (__m128)__builtin_ia32_getexpss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) + +#define _mm_getexp_round_sd(A, B, R) \ + ((__m128d)__builtin_ia32_getexpsd128_round((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), R)) + +#define _mm_mask_getexp_round_sd(W, U, A, B, C) \ + (__m128d)__builtin_ia32_getexpsd_mask_round(A, B, W, U, C) + +#define _mm_maskz_getexp_round_sd(U, A, B, C) \ + (__m128d)__builtin_ia32_getexpsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) + + +#define _mm512_getexp_round_ps(A, R) \ + ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, R)) + +#define _mm512_mask_getexp_round_ps(W, U, A, R) \ + ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(W), (__mmask16)(U), R)) + +#define _mm512_maskz_getexp_round_ps(U, A, R) \ + ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), (__mmask16)(U), R)) + +#define _mm512_getexp_round_pd(A, R) \ + ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_undefined_pd(), (__mmask8)-1, R)) + +#define _mm512_mask_getexp_round_pd(W, U, A, R) \ + ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(W), (__mmask8)(U), R)) + +#define _mm512_maskz_getexp_round_pd(U, A, R) \ + ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), (__mmask8)(U), R)) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_roundscale_round_ps (__m512 __A, const int __imm, const int __R) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, __imm, + (__v16sf) + _mm512_undefined_ps (), + -1, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_roundscale_round_ps (__m512 __A, __mmask16 __B, __m512 __C, + const int __imm, const int __R) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __C, __imm, + (__v16sf) __A, + (__mmask16) __B, __R); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_roundscale_round_ps (__mmask16 __A, __m512 __B, + const int __imm, const int __R) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __B, + __imm, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __A, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_roundscale_round_pd (__m512d __A, const int __imm, const int __R) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, __imm, + (__v8df) + _mm512_undefined_pd (), + -1, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_roundscale_round_pd (__m512d __A, __mmask8 __B, + __m512d __C, const int __imm, const int __R) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __C, __imm, + (__v8df) __A, + (__mmask8) __B, __R); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_roundscale_round_pd (__mmask8 __A, __m512d __B, + const int __imm, const int __R) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __B, + __imm, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __A, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roundscale_round_ss (__m128 __A, __m128 __B, const int __imm, + const int __R) +{ + return (__m128) + __builtin_ia32_rndscaless_mask_round ((__v4sf) __A, + (__v4sf) __B, __imm, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_roundscale_round_ss (__m128 __A, __mmask8 __B, __m128 __C, + __m128 __D, const int __imm, const int __R) +{ + return (__m128) + __builtin_ia32_rndscaless_mask_round ((__v4sf) __C, + (__v4sf) __D, __imm, + (__v4sf) __A, + (__mmask8) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_roundscale_round_ss (__mmask8 __A, __m128 __B, __m128 __C, + const int __imm, const int __R) +{ + return (__m128) + __builtin_ia32_rndscaless_mask_round ((__v4sf) __B, + (__v4sf) __C, __imm, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __A, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roundscale_round_sd (__m128d __A, __m128d __B, const int __imm, + const int __R) +{ + return (__m128d) + __builtin_ia32_rndscalesd_mask_round ((__v2df) __A, + (__v2df) __B, __imm, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_roundscale_round_sd (__m128d __A, __mmask8 __B, __m128d __C, + __m128d __D, const int __imm, const int __R) +{ + return (__m128d) + __builtin_ia32_rndscalesd_mask_round ((__v2df) __C, + (__v2df) __D, __imm, + (__v2df) __A, + (__mmask8) __B, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_roundscale_round_sd (__mmask8 __A, __m128d __B, __m128d __C, + const int __imm, const int __R) +{ + return (__m128d) + __builtin_ia32_rndscalesd_mask_round ((__v2df) __B, + (__v2df) __C, __imm, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __A, + __R); +} + +#else +#define _mm512_roundscale_round_ps(A, B, R) \ + ((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(A), (int)(B),\ + (__v16sf)_mm512_undefined_ps(), (__mmask16)(-1), R)) +#define _mm512_mask_roundscale_round_ps(A, B, C, D, R) \ + ((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(C), \ + (int)(D), \ + (__v16sf)(__m512)(A), \ + (__mmask16)(B), R)) +#define _mm512_maskz_roundscale_round_ps(A, B, C, R) \ + ((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(B), \ + (int)(C), \ + (__v16sf)_mm512_setzero_ps(),\ + (__mmask16)(A), R)) +#define _mm512_roundscale_round_pd(A, B, R) \ + ((__m512d) __builtin_ia32_rndscalepd_mask ((__v8df)(__m512d)(A), (int)(B),\ + (__v8df)_mm512_undefined_pd(), (__mmask8)(-1), R)) +#define _mm512_mask_roundscale_round_pd(A, B, C, D, R) \ + ((__m512d) __builtin_ia32_rndscalepd_mask ((__v8df)(__m512d)(C), \ + (int)(D), \ + (__v8df)(__m512d)(A), \ + (__mmask8)(B), R)) +#define _mm512_maskz_roundscale_round_pd(A, B, C, R) \ + ((__m512d) __builtin_ia32_rndscalepd_mask ((__v8df)(__m512d)(B), \ + (int)(C), \ + (__v8df)_mm512_setzero_pd(),\ + (__mmask8)(A), R)) +#define _mm_roundscale_round_ss(A, B, I, R) \ + ((__m128) \ + __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (A), \ + (__v4sf) (__m128) (B), \ + (int) (I), \ + (__v4sf) _mm_setzero_ps (), \ + (__mmask8) (-1), \ + (int) (R))) +#define _mm_mask_roundscale_round_ss(A, U, B, C, I, R) \ + ((__m128) \ + __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (B), \ + (__v4sf) (__m128) (C), \ + (int) (I), \ + (__v4sf) (__m128) (A), \ + (__mmask8) (U), \ + (int) (R))) +#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \ + ((__m128) \ + __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (A), \ + (__v4sf) (__m128) (B), \ + (int) (I), \ + (__v4sf) _mm_setzero_ps (), \ + (__mmask8) (U), \ + (int) (R))) +#define _mm_roundscale_round_sd(A, B, I, R) \ + ((__m128d) \ + __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (A), \ + (__v2df) (__m128d) (B), \ + (int) (I), \ + (__v2df) _mm_setzero_pd (), \ + (__mmask8) (-1), \ + (int) (R))) +#define _mm_mask_roundscale_round_sd(A, U, B, C, I, R) \ + ((__m128d) \ + __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (B), \ + (__v2df) (__m128d) (C), \ + (int) (I), \ + (__v2df) (__m128d) (A), \ + (__mmask8) (U), \ + (int) (R))) +#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \ + ((__m128d) \ + __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (A), \ + (__v2df) (__m128d) (B), \ + (int) (I), \ + (__v2df) _mm_setzero_pd (), \ + (__mmask8) (U), \ + (int) (R))) +#endif + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_floor_ps (__m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_FLOOR, + (__v16sf) __A, -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_floor_pd (__m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_FLOOR, + (__v8df) __A, -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_ceil_ps (__m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_CEIL, + (__v16sf) __A, -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_ceil_pd (__m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_CEIL, + (__v8df) __A, -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_FLOOR, + (__v16sf) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_FLOOR, + (__v8df) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_CEIL, + (__v16sf) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_CEIL, + (__v8df) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_alignr_epi32 (__m512i __A, __m512i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_alignd512_mask ((__v16si) __A, + (__v16si) __B, __imm, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_alignr_epi32 (__m512i __W, __mmask16 __U, __m512i __A, + __m512i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_alignd512_mask ((__v16si) __A, + (__v16si) __B, __imm, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_alignr_epi32 (__mmask16 __U, __m512i __A, __m512i __B, + const int __imm) +{ + return (__m512i) __builtin_ia32_alignd512_mask ((__v16si) __A, + (__v16si) __B, __imm, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_alignr_epi64 (__m512i __A, __m512i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_alignq512_mask ((__v8di) __A, + (__v8di) __B, __imm, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_alignr_epi64 (__m512i __W, __mmask8 __U, __m512i __A, + __m512i __B, const int __imm) +{ + return (__m512i) __builtin_ia32_alignq512_mask ((__v8di) __A, + (__v8di) __B, __imm, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_alignr_epi64 (__mmask8 __U, __m512i __A, __m512i __B, + const int __imm) +{ + return (__m512i) __builtin_ia32_alignq512_mask ((__v8di) __A, + (__v8di) __B, __imm, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} +#else +#define _mm512_alignr_epi32(X, Y, C) \ + ((__m512i)__builtin_ia32_alignd512_mask ((__v16si)(__m512i)(X), \ + (__v16si)(__m512i)(Y), (int)(C), (__v16si)_mm512_undefined_epi32 (),\ + (__mmask16)-1)) + +#define _mm512_mask_alignr_epi32(W, U, X, Y, C) \ + ((__m512i)__builtin_ia32_alignd512_mask ((__v16si)(__m512i)(X), \ + (__v16si)(__m512i)(Y), (int)(C), (__v16si)(__m512i)(W), \ + (__mmask16)(U))) + +#define _mm512_maskz_alignr_epi32(U, X, Y, C) \ + ((__m512i)__builtin_ia32_alignd512_mask ((__v16si)(__m512i)(X), \ + (__v16si)(__m512i)(Y), (int)(C), (__v16si)_mm512_setzero_si512 (),\ + (__mmask16)(U))) + +#define _mm512_alignr_epi64(X, Y, C) \ + ((__m512i)__builtin_ia32_alignq512_mask ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), (int)(C), (__v8di)_mm512_undefined_epi32 (), \ + (__mmask8)-1)) + +#define _mm512_mask_alignr_epi64(W, U, X, Y, C) \ + ((__m512i)__builtin_ia32_alignq512_mask ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), (int)(C), (__v8di)(__m512i)(W), (__mmask8)(U))) + +#define _mm512_maskz_alignr_epi64(U, X, Y, C) \ + ((__m512i)__builtin_ia32_alignq512_mask ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), (int)(C), (__v8di)_mm512_setzero_si512 (),\ + (__mmask8)(U))) +#endif + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpeq_epi32_mask (__m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_pcmpeqd512_mask ((__v16si) __A, + (__v16si) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpeq_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_pcmpeqd512_mask ((__v16si) __A, + (__v16si) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpeq_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_pcmpeqq512_mask ((__v8di) __A, + (__v8di) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpeq_epi64_mask (__m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_pcmpeqq512_mask ((__v8di) __A, + (__v8di) __B, + (__mmask8) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpgt_epi32_mask (__m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_pcmpgtd512_mask ((__v16si) __A, + (__v16si) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpgt_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_pcmpgtd512_mask ((__v16si) __A, + (__v16si) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpgt_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_pcmpgtq512_mask ((__v8di) __A, + (__v8di) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpgt_epi64_mask (__m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_pcmpgtq512_mask ((__v8di) __A, + (__v8di) __B, + (__mmask8) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpge_epi32_mask (__m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_cmpd512_mask ((__v16si) __X, + (__v16si) __Y, 5, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpge_epi32_mask (__mmask16 __M, __m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_cmpd512_mask ((__v16si) __X, + (__v16si) __Y, 5, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpge_epu32_mask (__mmask16 __M, __m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __X, + (__v16si) __Y, 5, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpge_epu32_mask (__m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __X, + (__v16si) __Y, 5, + (__mmask16) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpge_epi64_mask (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq512_mask ((__v8di) __X, + (__v8di) __Y, 5, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpge_epi64_mask (__m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq512_mask ((__v8di) __X, + (__v8di) __Y, 5, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpge_epu64_mask (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __X, + (__v8di) __Y, 5, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpge_epu64_mask (__m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __X, + (__v8di) __Y, 5, + (__mmask8) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmple_epi32_mask (__mmask16 __M, __m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_cmpd512_mask ((__v16si) __X, + (__v16si) __Y, 2, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmple_epi32_mask (__m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_cmpd512_mask ((__v16si) __X, + (__v16si) __Y, 2, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmple_epu32_mask (__mmask16 __M, __m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __X, + (__v16si) __Y, 2, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmple_epu32_mask (__m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __X, + (__v16si) __Y, 2, + (__mmask16) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmple_epi64_mask (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq512_mask ((__v8di) __X, + (__v8di) __Y, 2, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmple_epi64_mask (__m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq512_mask ((__v8di) __X, + (__v8di) __Y, 2, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmple_epu64_mask (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __X, + (__v8di) __Y, 2, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmple_epu64_mask (__m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __X, + (__v8di) __Y, 2, + (__mmask8) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmplt_epi32_mask (__mmask16 __M, __m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_cmpd512_mask ((__v16si) __X, + (__v16si) __Y, 1, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmplt_epi32_mask (__m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_cmpd512_mask ((__v16si) __X, + (__v16si) __Y, 1, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmplt_epu32_mask (__mmask16 __M, __m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __X, + (__v16si) __Y, 1, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmplt_epu32_mask (__m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __X, + (__v16si) __Y, 1, + (__mmask16) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmplt_epi64_mask (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq512_mask ((__v8di) __X, + (__v8di) __Y, 1, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmplt_epi64_mask (__m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq512_mask ((__v8di) __X, + (__v8di) __Y, 1, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmplt_epu64_mask (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __X, + (__v8di) __Y, 1, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmplt_epu64_mask (__m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __X, + (__v8di) __Y, 1, + (__mmask8) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpneq_epi32_mask (__m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_cmpd512_mask ((__v16si) __X, + (__v16si) __Y, 4, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpneq_epi32_mask (__mmask16 __M, __m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_cmpd512_mask ((__v16si) __X, + (__v16si) __Y, 4, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpneq_epu32_mask (__mmask16 __M, __m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __X, + (__v16si) __Y, 4, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpneq_epu32_mask (__m512i __X, __m512i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __X, + (__v16si) __Y, 4, + (__mmask16) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpneq_epi64_mask (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq512_mask ((__v8di) __X, + (__v8di) __Y, 4, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpneq_epi64_mask (__m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq512_mask ((__v8di) __X, + (__v8di) __Y, 4, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpneq_epu64_mask (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __X, + (__v8di) __Y, 4, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpneq_epu64_mask (__m512i __X, __m512i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __X, + (__v8di) __Y, 4, + (__mmask8) -1); +} + +#define _MM_CMPINT_EQ 0x0 +#define _MM_CMPINT_LT 0x1 +#define _MM_CMPINT_LE 0x2 +#define _MM_CMPINT_UNUSED 0x3 +#define _MM_CMPINT_NE 0x4 +#define _MM_CMPINT_NLT 0x5 +#define _MM_CMPINT_GE 0x5 +#define _MM_CMPINT_NLE 0x6 +#define _MM_CMPINT_GT 0x6 + +#ifdef __OPTIMIZE__ +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kshiftli_mask16 (__mmask16 __A, unsigned int __B) +{ + return (__mmask16) __builtin_ia32_kshiftlihi ((__mmask16) __A, + (__mmask8) __B); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kshiftri_mask16 (__mmask16 __A, unsigned int __B) +{ + return (__mmask16) __builtin_ia32_kshiftrihi ((__mmask16) __A, + (__mmask8) __B); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_epi64_mask (__m512i __X, __m512i __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmpq512_mask ((__v8di) __X, + (__v8di) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_epi32_mask (__m512i __X, __m512i __Y, const int __P) +{ + return (__mmask16) __builtin_ia32_cmpd512_mask ((__v16si) __X, + (__v16si) __Y, __P, + (__mmask16) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_epu64_mask (__m512i __X, __m512i __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __X, + (__v8di) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_epu32_mask (__m512i __X, __m512i __Y, const int __P) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __X, + (__v16si) __Y, __P, + (__mmask16) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_round_pd_mask (__m512d __X, __m512d __Y, const int __P, + const int __R) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, __P, + (__mmask8) -1, __R); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_round_ps_mask (__m512 __X, __m512 __Y, const int __P, const int __R) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, __P, + (__mmask16) -1, __R); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_epi64_mask (__mmask8 __U, __m512i __X, __m512i __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_cmpq512_mask ((__v8di) __X, + (__v8di) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_epi32_mask (__mmask16 __U, __m512i __X, __m512i __Y, + const int __P) +{ + return (__mmask16) __builtin_ia32_cmpd512_mask ((__v16si) __X, + (__v16si) __Y, __P, + (__mmask16) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_epu64_mask (__mmask8 __U, __m512i __X, __m512i __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __X, + (__v8di) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_epu32_mask (__mmask16 __U, __m512i __X, __m512i __Y, + const int __P) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __X, + (__v16si) __Y, __P, + (__mmask16) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_round_pd_mask (__mmask8 __U, __m512d __X, __m512d __Y, + const int __P, const int __R) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, __P, + (__mmask8) __U, __R); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_round_ps_mask (__mmask16 __U, __m512 __X, __m512 __Y, + const int __P, const int __R) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, __P, + (__mmask16) __U, __R); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_round_sd_mask (__m128d __X, __m128d __Y, const int __P, const int __R) +{ + return (__mmask8) __builtin_ia32_cmpsd_mask ((__v2df) __X, + (__v2df) __Y, __P, + (__mmask8) -1, __R); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_round_sd_mask (__mmask8 __M, __m128d __X, __m128d __Y, + const int __P, const int __R) +{ + return (__mmask8) __builtin_ia32_cmpsd_mask ((__v2df) __X, + (__v2df) __Y, __P, + (__mmask8) __M, __R); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_round_ss_mask (__m128 __X, __m128 __Y, const int __P, const int __R) +{ + return (__mmask8) __builtin_ia32_cmpss_mask ((__v4sf) __X, + (__v4sf) __Y, __P, + (__mmask8) -1, __R); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_round_ss_mask (__mmask8 __M, __m128 __X, __m128 __Y, + const int __P, const int __R) +{ + return (__mmask8) __builtin_ia32_cmpss_mask ((__v4sf) __X, + (__v4sf) __Y, __P, + (__mmask8) __M, __R); +} + +#else +#define _kshiftli_mask16(X, Y) \ + ((__mmask16) __builtin_ia32_kshiftlihi ((__mmask16)(X), (__mmask8)(Y))) + +#define _kshiftri_mask16(X, Y) \ + ((__mmask16) __builtin_ia32_kshiftrihi ((__mmask16)(X), (__mmask8)(Y))) + +#define _mm512_cmp_epi64_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpq512_mask ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm512_cmp_epi32_mask(X, Y, P) \ + ((__mmask16) __builtin_ia32_cmpd512_mask ((__v16si)(__m512i)(X), \ + (__v16si)(__m512i)(Y), (int)(P), \ + (__mmask16)-1)) + +#define _mm512_cmp_epu64_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm512_cmp_epu32_mask(X, Y, P) \ + ((__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si)(__m512i)(X), \ + (__v16si)(__m512i)(Y), (int)(P), \ + (__mmask16)-1)) + +#define _mm512_cmp_round_pd_mask(X, Y, P, R) \ + ((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (int)(P),\ + (__mmask8)-1, R)) + +#define _mm512_cmp_round_ps_mask(X, Y, P, R) \ + ((__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (int)(P),\ + (__mmask16)-1, R)) + +#define _mm512_mask_cmp_epi64_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpq512_mask ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm512_mask_cmp_epi32_mask(M, X, Y, P) \ + ((__mmask16) __builtin_ia32_cmpd512_mask ((__v16si)(__m512i)(X), \ + (__v16si)(__m512i)(Y), (int)(P), \ + (__mmask16)(M))) + +#define _mm512_mask_cmp_epu64_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di)(__m512i)(X), \ + (__v8di)(__m512i)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm512_mask_cmp_epu32_mask(M, X, Y, P) \ + ((__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si)(__m512i)(X), \ + (__v16si)(__m512i)(Y), (int)(P), \ + (__mmask16)(M))) + +#define _mm512_mask_cmp_round_pd_mask(M, X, Y, P, R) \ + ((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (int)(P),\ + (__mmask8)(M), R)) + +#define _mm512_mask_cmp_round_ps_mask(M, X, Y, P, R) \ + ((__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (int)(P),\ + (__mmask16)(M), R)) + +#define _mm_cmp_round_sd_mask(X, Y, P, R) \ + ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P),\ + (__mmask8)-1, R)) + +#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \ + ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P),\ + (M), R)) + +#define _mm_cmp_round_ss_mask(X, Y, P, R) \ + ((__mmask8) __builtin_ia32_cmpss_mask ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)-1, R)) + +#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \ + ((__mmask8) __builtin_ia32_cmpss_mask ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (M), R)) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i32gather_ps (__m512i __index, void const *__addr, int __scale) +{ + __m512 __v1_old = _mm512_undefined_ps (); + __mmask16 __mask = 0xFFFF; + + return (__m512) __builtin_ia32_gathersiv16sf ((__v16sf) __v1_old, + __addr, + (__v16si) __index, + __mask, __scale); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i32gather_ps (__m512 __v1_old, __mmask16 __mask, + __m512i __index, void const *__addr, int __scale) +{ + return (__m512) __builtin_ia32_gathersiv16sf ((__v16sf) __v1_old, + __addr, + (__v16si) __index, + __mask, __scale); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i32gather_pd (__m256i __index, void const *__addr, int __scale) +{ + __m512d __v1_old = _mm512_undefined_pd (); + __mmask8 __mask = 0xFF; + + return (__m512d) __builtin_ia32_gathersiv8df ((__v8df) __v1_old, + __addr, + (__v8si) __index, __mask, + __scale); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i32gather_pd (__m512d __v1_old, __mmask8 __mask, + __m256i __index, void const *__addr, int __scale) +{ + return (__m512d) __builtin_ia32_gathersiv8df ((__v8df) __v1_old, + __addr, + (__v8si) __index, + __mask, __scale); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i64gather_ps (__m512i __index, void const *__addr, int __scale) +{ + __m256 __v1_old = _mm256_undefined_ps (); + __mmask8 __mask = 0xFF; + + return (__m256) __builtin_ia32_gatherdiv16sf ((__v8sf) __v1_old, + __addr, + (__v8di) __index, __mask, + __scale); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i64gather_ps (__m256 __v1_old, __mmask8 __mask, + __m512i __index, void const *__addr, int __scale) +{ + return (__m256) __builtin_ia32_gatherdiv16sf ((__v8sf) __v1_old, + __addr, + (__v8di) __index, + __mask, __scale); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i64gather_pd (__m512i __index, void const *__addr, int __scale) +{ + __m512d __v1_old = _mm512_undefined_pd (); + __mmask8 __mask = 0xFF; + + return (__m512d) __builtin_ia32_gatherdiv8df ((__v8df) __v1_old, + __addr, + (__v8di) __index, __mask, + __scale); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i64gather_pd (__m512d __v1_old, __mmask8 __mask, + __m512i __index, void const *__addr, int __scale) +{ + return (__m512d) __builtin_ia32_gatherdiv8df ((__v8df) __v1_old, + __addr, + (__v8di) __index, + __mask, __scale); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i32gather_epi32 (__m512i __index, void const *__addr, int __scale) +{ + __m512i __v1_old = _mm512_undefined_epi32 (); + __mmask16 __mask = 0xFFFF; + + return (__m512i) __builtin_ia32_gathersiv16si ((__v16si) __v1_old, + __addr, + (__v16si) __index, + __mask, __scale); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i32gather_epi32 (__m512i __v1_old, __mmask16 __mask, + __m512i __index, void const *__addr, int __scale) +{ + return (__m512i) __builtin_ia32_gathersiv16si ((__v16si) __v1_old, + __addr, + (__v16si) __index, + __mask, __scale); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i32gather_epi64 (__m256i __index, void const *__addr, int __scale) +{ + __m512i __v1_old = _mm512_undefined_epi32 (); + __mmask8 __mask = 0xFF; + + return (__m512i) __builtin_ia32_gathersiv8di ((__v8di) __v1_old, + __addr, + (__v8si) __index, __mask, + __scale); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i32gather_epi64 (__m512i __v1_old, __mmask8 __mask, + __m256i __index, void const *__addr, + int __scale) +{ + return (__m512i) __builtin_ia32_gathersiv8di ((__v8di) __v1_old, + __addr, + (__v8si) __index, + __mask, __scale); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i64gather_epi32 (__m512i __index, void const *__addr, int __scale) +{ + __m256i __v1_old = _mm256_undefined_si256 (); + __mmask8 __mask = 0xFF; + + return (__m256i) __builtin_ia32_gatherdiv16si ((__v8si) __v1_old, + __addr, + (__v8di) __index, + __mask, __scale); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i64gather_epi32 (__m256i __v1_old, __mmask8 __mask, + __m512i __index, void const *__addr, int __scale) +{ + return (__m256i) __builtin_ia32_gatherdiv16si ((__v8si) __v1_old, + __addr, + (__v8di) __index, + __mask, __scale); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i64gather_epi64 (__m512i __index, void const *__addr, int __scale) +{ + __m512i __v1_old = _mm512_undefined_epi32 (); + __mmask8 __mask = 0xFF; + + return (__m512i) __builtin_ia32_gatherdiv8di ((__v8di) __v1_old, + __addr, + (__v8di) __index, __mask, + __scale); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i64gather_epi64 (__m512i __v1_old, __mmask8 __mask, + __m512i __index, void const *__addr, + int __scale) +{ + return (__m512i) __builtin_ia32_gatherdiv8di ((__v8di) __v1_old, + __addr, + (__v8di) __index, + __mask, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i32scatter_ps (void *__addr, __m512i __index, __m512 __v1, int __scale) +{ + __builtin_ia32_scattersiv16sf (__addr, (__mmask16) 0xFFFF, + (__v16si) __index, (__v16sf) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i32scatter_ps (void *__addr, __mmask16 __mask, + __m512i __index, __m512 __v1, int __scale) +{ + __builtin_ia32_scattersiv16sf (__addr, __mask, (__v16si) __index, + (__v16sf) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i32scatter_pd (void *__addr, __m256i __index, __m512d __v1, + int __scale) +{ + __builtin_ia32_scattersiv8df (__addr, (__mmask8) 0xFF, + (__v8si) __index, (__v8df) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i32scatter_pd (void *__addr, __mmask8 __mask, + __m256i __index, __m512d __v1, int __scale) +{ + __builtin_ia32_scattersiv8df (__addr, __mask, (__v8si) __index, + (__v8df) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i64scatter_ps (void *__addr, __m512i __index, __m256 __v1, int __scale) +{ + __builtin_ia32_scatterdiv16sf (__addr, (__mmask8) 0xFF, + (__v8di) __index, (__v8sf) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i64scatter_ps (void *__addr, __mmask8 __mask, + __m512i __index, __m256 __v1, int __scale) +{ + __builtin_ia32_scatterdiv16sf (__addr, __mask, (__v8di) __index, + (__v8sf) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i64scatter_pd (void *__addr, __m512i __index, __m512d __v1, + int __scale) +{ + __builtin_ia32_scatterdiv8df (__addr, (__mmask8) 0xFF, + (__v8di) __index, (__v8df) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i64scatter_pd (void *__addr, __mmask8 __mask, + __m512i __index, __m512d __v1, int __scale) +{ + __builtin_ia32_scatterdiv8df (__addr, __mask, (__v8di) __index, + (__v8df) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i32scatter_epi32 (void *__addr, __m512i __index, + __m512i __v1, int __scale) +{ + __builtin_ia32_scattersiv16si (__addr, (__mmask16) 0xFFFF, + (__v16si) __index, (__v16si) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i32scatter_epi32 (void *__addr, __mmask16 __mask, + __m512i __index, __m512i __v1, int __scale) +{ + __builtin_ia32_scattersiv16si (__addr, __mask, (__v16si) __index, + (__v16si) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i32scatter_epi64 (void *__addr, __m256i __index, + __m512i __v1, int __scale) +{ + __builtin_ia32_scattersiv8di (__addr, (__mmask8) 0xFF, + (__v8si) __index, (__v8di) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i32scatter_epi64 (void *__addr, __mmask8 __mask, + __m256i __index, __m512i __v1, int __scale) +{ + __builtin_ia32_scattersiv8di (__addr, __mask, (__v8si) __index, + (__v8di) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i64scatter_epi32 (void *__addr, __m512i __index, + __m256i __v1, int __scale) +{ + __builtin_ia32_scatterdiv16si (__addr, (__mmask8) 0xFF, + (__v8di) __index, (__v8si) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i64scatter_epi32 (void *__addr, __mmask8 __mask, + __m512i __index, __m256i __v1, int __scale) +{ + __builtin_ia32_scatterdiv16si (__addr, __mask, (__v8di) __index, + (__v8si) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_i64scatter_epi64 (void *__addr, __m512i __index, + __m512i __v1, int __scale) +{ + __builtin_ia32_scatterdiv8di (__addr, (__mmask8) 0xFF, + (__v8di) __index, (__v8di) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_i64scatter_epi64 (void *__addr, __mmask8 __mask, + __m512i __index, __m512i __v1, int __scale) +{ + __builtin_ia32_scatterdiv8di (__addr, __mask, (__v8di) __index, + (__v8di) __v1, __scale); +} +#else +#define _mm512_i32gather_ps(INDEX, ADDR, SCALE) \ + (__m512) __builtin_ia32_gathersiv16sf ((__v16sf)_mm512_undefined_ps(),\ + (void const *) (ADDR), \ + (__v16si)(__m512i) (INDEX), \ + (__mmask16)0xFFFF, \ + (int) (SCALE)) + +#define _mm512_mask_i32gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m512) __builtin_ia32_gathersiv16sf ((__v16sf)(__m512) (V1OLD), \ + (void const *) (ADDR), \ + (__v16si)(__m512i) (INDEX), \ + (__mmask16) (MASK), \ + (int) (SCALE)) + +#define _mm512_i32gather_pd(INDEX, ADDR, SCALE) \ + (__m512d) __builtin_ia32_gathersiv8df ((__v8df)_mm512_undefined_pd(), \ + (void const *) (ADDR), \ + (__v8si)(__m256i) (INDEX), \ + (__mmask8)0xFF, (int) (SCALE)) + +#define _mm512_mask_i32gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m512d) __builtin_ia32_gathersiv8df ((__v8df)(__m512d) (V1OLD), \ + (void const *) (ADDR), \ + (__v8si)(__m256i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm512_i64gather_ps(INDEX, ADDR, SCALE) \ + (__m256) __builtin_ia32_gatherdiv16sf ((__v8sf)_mm256_undefined_ps(), \ + (void const *) (ADDR), \ + (__v8di)(__m512i) (INDEX), \ + (__mmask8)0xFF, (int) (SCALE)) + +#define _mm512_mask_i64gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m256) __builtin_ia32_gatherdiv16sf ((__v8sf)(__m256) (V1OLD), \ + (void const *) (ADDR), \ + (__v8di)(__m512i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm512_i64gather_pd(INDEX, ADDR, SCALE) \ + (__m512d) __builtin_ia32_gatherdiv8df ((__v8df)_mm512_undefined_pd(), \ + (void const *) (ADDR), \ + (__v8di)(__m512i) (INDEX), \ + (__mmask8)0xFF, (int) (SCALE)) + +#define _mm512_mask_i64gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m512d) __builtin_ia32_gatherdiv8df ((__v8df)(__m512d) (V1OLD), \ + (void const *) (ADDR), \ + (__v8di)(__m512i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm512_i32gather_epi32(INDEX, ADDR, SCALE) \ + (__m512i) __builtin_ia32_gathersiv16si ((__v16si)_mm512_undefined_epi32 (),\ + (void const *) (ADDR), \ + (__v16si)(__m512i) (INDEX), \ + (__mmask16)0xFFFF, \ + (int) (SCALE)) + +#define _mm512_mask_i32gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m512i) __builtin_ia32_gathersiv16si ((__v16si)(__m512i) (V1OLD), \ + (void const *) (ADDR), \ + (__v16si)(__m512i) (INDEX), \ + (__mmask16) (MASK), \ + (int) (SCALE)) + +#define _mm512_i32gather_epi64(INDEX, ADDR, SCALE) \ + (__m512i) __builtin_ia32_gathersiv8di ((__v8di)_mm512_undefined_epi32 (),\ + (void const *) (ADDR), \ + (__v8si)(__m256i) (INDEX), \ + (__mmask8)0xFF, (int) (SCALE)) + +#define _mm512_mask_i32gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m512i) __builtin_ia32_gathersiv8di ((__v8di)(__m512i) (V1OLD), \ + (void const *) (ADDR), \ + (__v8si)(__m256i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm512_i64gather_epi32(INDEX, ADDR, SCALE) \ + (__m256i) __builtin_ia32_gatherdiv16si ((__v8si)_mm256_undefined_si256(),\ + (void const *) (ADDR), \ + (__v8di)(__m512i) (INDEX), \ + (__mmask8)0xFF, (int) (SCALE)) + +#define _mm512_mask_i64gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m256i) __builtin_ia32_gatherdiv16si ((__v8si)(__m256i) (V1OLD), \ + (void const *) (ADDR), \ + (__v8di)(__m512i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm512_i64gather_epi64(INDEX, ADDR, SCALE) \ + (__m512i) __builtin_ia32_gatherdiv8di ((__v8di)_mm512_undefined_epi32 (),\ + (void const *) (ADDR), \ + (__v8di)(__m512i) (INDEX), \ + (__mmask8)0xFF, (int) (SCALE)) + +#define _mm512_mask_i64gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m512i) __builtin_ia32_gatherdiv8di ((__v8di)(__m512i) (V1OLD), \ + (void const *) (ADDR), \ + (__v8di)(__m512i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm512_i32scatter_ps(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv16sf ((void *) (ADDR), (__mmask16)0xFFFF, \ + (__v16si)(__m512i) (INDEX), \ + (__v16sf)(__m512) (V1), (int) (SCALE)) + +#define _mm512_mask_i32scatter_ps(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv16sf ((void *) (ADDR), (__mmask16) (MASK), \ + (__v16si)(__m512i) (INDEX), \ + (__v16sf)(__m512) (V1), (int) (SCALE)) + +#define _mm512_i32scatter_pd(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv8df ((void *) (ADDR), (__mmask8)0xFF, \ + (__v8si)(__m256i) (INDEX), \ + (__v8df)(__m512d) (V1), (int) (SCALE)) + +#define _mm512_mask_i32scatter_pd(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv8df ((void *) (ADDR), (__mmask8) (MASK), \ + (__v8si)(__m256i) (INDEX), \ + (__v8df)(__m512d) (V1), (int) (SCALE)) + +#define _mm512_i64scatter_ps(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv16sf ((void *) (ADDR), (__mmask8)0xFF, \ + (__v8di)(__m512i) (INDEX), \ + (__v8sf)(__m256) (V1), (int) (SCALE)) + +#define _mm512_mask_i64scatter_ps(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv16sf ((void *) (ADDR), (__mmask16) (MASK), \ + (__v8di)(__m512i) (INDEX), \ + (__v8sf)(__m256) (V1), (int) (SCALE)) + +#define _mm512_i64scatter_pd(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv8df ((void *) (ADDR), (__mmask8)0xFF, \ + (__v8di)(__m512i) (INDEX), \ + (__v8df)(__m512d) (V1), (int) (SCALE)) + +#define _mm512_mask_i64scatter_pd(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv8df ((void *) (ADDR), (__mmask8) (MASK), \ + (__v8di)(__m512i) (INDEX), \ + (__v8df)(__m512d) (V1), (int) (SCALE)) + +#define _mm512_i32scatter_epi32(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv16si ((void *) (ADDR), (__mmask16)0xFFFF, \ + (__v16si)(__m512i) (INDEX), \ + (__v16si)(__m512i) (V1), (int) (SCALE)) + +#define _mm512_mask_i32scatter_epi32(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv16si ((void *) (ADDR), (__mmask16) (MASK), \ + (__v16si)(__m512i) (INDEX), \ + (__v16si)(__m512i) (V1), (int) (SCALE)) + +#define _mm512_i32scatter_epi64(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv8di ((void *) (ADDR), (__mmask8)0xFF, \ + (__v8si)(__m256i) (INDEX), \ + (__v8di)(__m512i) (V1), (int) (SCALE)) + +#define _mm512_mask_i32scatter_epi64(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv8di ((void *) (ADDR), (__mmask8) (MASK), \ + (__v8si)(__m256i) (INDEX), \ + (__v8di)(__m512i) (V1), (int) (SCALE)) + +#define _mm512_i64scatter_epi32(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv16si ((void *) (ADDR), (__mmask8)0xFF, \ + (__v8di)(__m512i) (INDEX), \ + (__v8si)(__m256i) (V1), (int) (SCALE)) + +#define _mm512_mask_i64scatter_epi32(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv16si ((void *) (ADDR), (__mmask8) (MASK), \ + (__v8di)(__m512i) (INDEX), \ + (__v8si)(__m256i) (V1), (int) (SCALE)) + +#define _mm512_i64scatter_epi64(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv8di ((void *) (ADDR), (__mmask8)0xFF, \ + (__v8di)(__m512i) (INDEX), \ + (__v8di)(__m512i) (V1), (int) (SCALE)) + +#define _mm512_mask_i64scatter_epi64(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv8di ((void *) (ADDR), (__mmask8) (MASK), \ + (__v8di)(__m512i) (INDEX), \ + (__v8di)(__m512i) (V1), (int) (SCALE)) +#endif + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_compress_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_compress_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A) +{ + __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A, + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_compress_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_compress_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A) +{ + __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_compress_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_compress_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_compress_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A, + (__mmask16) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_expand_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_expanddf512_maskz ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_expandloadu_pd (__m512d __W, __mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *) __P, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_expandloadu_pd (__mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_expandloaddf512_maskz ((const __v8df *) __P, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_expand_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_expandsf512_maskz ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_expandloadu_ps (__m512 __W, __mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *) __P, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_expandloadu_ps (__mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_expandloadsf512_maskz ((const __v16sf *) __P, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_expand_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_expand_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expanddi512_maskz ((__v8di) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_expandloadu_epi64 (__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *) __P, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m512i) + __builtin_ia32_expandloaddi512_maskz ((const __v8di *) __P, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_expand_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expandsi512_maskz ((__v16si) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_expandloadu_epi32 (__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *) __P, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_expandloadu_epi32 (__mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadsi512_maskz ((const __v16si *) __P, + (__v16si) + _mm512_setzero_si512 + (), (__mmask16) __U); +} + +/* Mask arithmetic operations */ +#define _kand_mask16 _mm512_kand +#define _kandn_mask16 _mm512_kandn +#define _knot_mask16 _mm512_knot +#define _kor_mask16 _mm512_kor +#define _kxnor_mask16 _mm512_kxnor +#define _kxor_mask16 _mm512_kxor + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kortest_mask16_u8 (__mmask16 __A, __mmask16 __B, unsigned char *__CF) +{ + *__CF = (unsigned char) __builtin_ia32_kortestchi (__A, __B); + return (unsigned char) __builtin_ia32_kortestzhi (__A, __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kortestz_mask16_u8 (__mmask16 __A, __mmask16 __B) +{ + return (unsigned char) __builtin_ia32_kortestzhi ((__mmask16) __A, + (__mmask16) __B); +} + +extern __inline unsigned char +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kortestc_mask16_u8 (__mmask16 __A, __mmask16 __B) +{ + return (unsigned char) __builtin_ia32_kortestchi ((__mmask16) __A, + (__mmask16) __B); +} + +extern __inline unsigned int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_cvtmask16_u32 (__mmask16 __A) +{ + return (unsigned int) __builtin_ia32_kmovw ((__mmask16 ) __A); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_cvtu32_mask16 (unsigned int __A) +{ + return (__mmask16) __builtin_ia32_kmovw ((__mmask16 ) __A); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_load_mask16 (__mmask16 *__A) +{ + return (__mmask16) __builtin_ia32_kmovw (*(__mmask16 *) __A); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_store_mask16 (__mmask16 *__A, __mmask16 __B) +{ + *(__mmask16 *) __A = __builtin_ia32_kmovw (__B); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_kand (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_kandn (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, + (__mmask16) __B); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_kor (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_kortestz (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kortestzhi ((__mmask16) __A, + (__mmask16) __B); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_kortestc (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kortestchi ((__mmask16) __A, + (__mmask16) __B); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_kxnor (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_kxor (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_knot (__mmask16 __A) +{ + return (__mmask16) __builtin_ia32_knothi ((__mmask16) __A); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_kunpackb (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_kunpackb_mask16 (__mmask8 __A, __mmask8 __B) +{ + return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_inserti32x4 (__mmask16 __B, __m512i __C, __m128i __D, + const int __imm) +{ + return (__m512i) __builtin_ia32_inserti32x4_mask ((__v16si) __C, + (__v4si) __D, + __imm, + (__v16si) + _mm512_setzero_si512 (), + __B); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_insertf32x4 (__mmask16 __B, __m512 __C, __m128 __D, + const int __imm) +{ + return (__m512) __builtin_ia32_insertf32x4_mask ((__v16sf) __C, + (__v4sf) __D, + __imm, + (__v16sf) + _mm512_setzero_ps (), __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_inserti32x4 (__m512i __A, __mmask16 __B, __m512i __C, + __m128i __D, const int __imm) +{ + return (__m512i) __builtin_ia32_inserti32x4_mask ((__v16si) __C, + (__v4si) __D, + __imm, + (__v16si) __A, + __B); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_insertf32x4 (__m512 __A, __mmask16 __B, __m512 __C, + __m128 __D, const int __imm) +{ + return (__m512) __builtin_ia32_insertf32x4_mask ((__v16sf) __C, + (__v4sf) __D, + __imm, + (__v16sf) __A, __B); +} +#else +#define _mm512_maskz_insertf32x4(A, X, Y, C) \ + ((__m512) __builtin_ia32_insertf32x4_mask ((__v16sf)(__m512) (X), \ + (__v4sf)(__m128) (Y), (int) (C), (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(A))) + +#define _mm512_maskz_inserti32x4(A, X, Y, C) \ + ((__m512i) __builtin_ia32_inserti32x4_mask ((__v16si)(__m512i) (X), \ + (__v4si)(__m128i) (Y), (int) (C), (__v16si)_mm512_setzero_si512 (), \ + (__mmask16)(A))) + +#define _mm512_mask_insertf32x4(A, B, X, Y, C) \ + ((__m512) __builtin_ia32_insertf32x4_mask ((__v16sf)(__m512) (X), \ + (__v4sf)(__m128) (Y), (int) (C), (__v16sf)(__m512) (A), \ + (__mmask16)(B))) + +#define _mm512_mask_inserti32x4(A, B, X, Y, C) \ + ((__m512i) __builtin_ia32_inserti32x4_mask ((__v16si)(__m512i) (X), \ + (__v4si)(__m128i) (Y), (int) (C), (__v16si)(__m512i) (A), \ + (__mmask16)(B))) +#endif + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_epu64 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_epu64 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_undefined_epi32 (), + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_epu32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_epu32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, __M); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_unpacklo_ps (__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_unpcklps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_unpacklo_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_unpcklps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_unpcklps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_round_sd (__m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, + (__v2df) __B, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_round_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_maxsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_round_sd (__mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_maxsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_round_ss (__m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, + (__v4sf) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_round_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_maxss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_round_ss (__mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_maxss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_round_sd (__m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, + (__v2df) __B, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_round_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_minsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_round_sd (__mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_minsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_round_ss (__m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_minss_round ((__v4sf) __A, + (__v4sf) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_round_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_minss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_round_ss (__mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_minss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, __R); +} + +#else +#define _mm_max_round_sd(A, B, C) \ + (__m128d)__builtin_ia32_maxsd_round(A, B, C) + +#define _mm_mask_max_round_sd(W, U, A, B, C) \ + (__m128d)__builtin_ia32_maxsd_mask_round(A, B, W, U, C) + +#define _mm_maskz_max_round_sd(U, A, B, C) \ + (__m128d)__builtin_ia32_maxsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) + +#define _mm_max_round_ss(A, B, C) \ + (__m128)__builtin_ia32_maxss_round(A, B, C) + +#define _mm_mask_max_round_ss(W, U, A, B, C) \ + (__m128)__builtin_ia32_maxss_mask_round(A, B, W, U, C) + +#define _mm_maskz_max_round_ss(U, A, B, C) \ + (__m128)__builtin_ia32_maxss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) + +#define _mm_min_round_sd(A, B, C) \ + (__m128d)__builtin_ia32_minsd_round(A, B, C) + +#define _mm_mask_min_round_sd(W, U, A, B, C) \ + (__m128d)__builtin_ia32_minsd_mask_round(A, B, W, U, C) + +#define _mm_maskz_min_round_sd(U, A, B, C) \ + (__m128d)__builtin_ia32_minsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U, C) + +#define _mm_min_round_ss(A, B, C) \ + (__m128)__builtin_ia32_minss_round(A, B, C) + +#define _mm_mask_min_round_ss(W, U, A, B, C) \ + (__m128)__builtin_ia32_minss_mask_round(A, B, W, U, C) + +#define _mm_maskz_min_round_ss(U, A, B, C) \ + (__m128)__builtin_ia32_minss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U, C) + +#endif + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_blend_pd (__mmask8 __U, __m512d __A, __m512d __W) +{ + return (__m512d) __builtin_ia32_blendmpd_512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_blend_ps (__mmask16 __U, __m512 __A, __m512 __W) +{ + return (__m512) __builtin_ia32_blendmps_512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_blend_epi64 (__mmask8 __U, __m512i __A, __m512i __W) +{ + return (__m512i) __builtin_ia32_blendmq_512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_blend_epi32 (__mmask16 __U, __m512i __A, __m512i __W) +{ + return (__m512i) __builtin_ia32_blendmd_512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_round_sd (__m128d __W, __m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_round ((__v2df) __W, + (__v2df) __A, + (__v2df) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_round_ss (__m128 __W, __m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_round ((__v4sf) __W, + (__v4sf) __A, + (__v4sf) __B, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsub_round_sd (__m128d __W, __m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_round ((__v2df) __W, + (__v2df) __A, + -(__v2df) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsub_round_ss (__m128 __W, __m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_round ((__v4sf) __W, + (__v4sf) __A, + -(__v4sf) __B, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmadd_round_sd (__m128d __W, __m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_round ((__v2df) __W, + -(__v2df) __A, + (__v2df) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmadd_round_ss (__m128 __W, __m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_round ((__v4sf) __W, + -(__v4sf) __A, + (__v4sf) __B, + __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmsub_round_sd (__m128d __W, __m128d __A, __m128d __B, const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_round ((__v2df) __W, + -(__v2df) __A, + -(__v2df) __B, + __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmsub_round_ss (__m128 __W, __m128 __A, __m128 __B, const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_round ((__v4sf) __W, + -(__v4sf) __A, + -(__v4sf) __B, + __R); +} +#else +#define _mm_fmadd_round_sd(A, B, C, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_round(A, B, C, R) + +#define _mm_fmadd_round_ss(A, B, C, R) \ + (__m128)__builtin_ia32_vfmaddss3_round(A, B, C, R) + +#define _mm_fmsub_round_sd(A, B, C, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_round(A, B, -(C), R) + +#define _mm_fmsub_round_ss(A, B, C, R) \ + (__m128)__builtin_ia32_vfmaddss3_round(A, B, -(C), R) + +#define _mm_fnmadd_round_sd(A, B, C, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_round(A, -(B), C, R) + +#define _mm_fnmadd_round_ss(A, B, C, R) \ + (__m128)__builtin_ia32_vfmaddss3_round(A, -(B), C, R) + +#define _mm_fnmsub_round_sd(A, B, C, R) \ + (__m128d)__builtin_ia32_vfmaddsd3_round(A, -(B), -(C), R) + +#define _mm_fnmsub_round_ss(A, B, C, R) \ + (__m128)__builtin_ia32_vfmaddss3_round(A, -(B), -(C), R) +#endif + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W, + (__v2df) __A, + (__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W, + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W, + (__v2df) __A, + (__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W, + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W, + (__v2df) __A, + (__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W, + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W, + (__v2df) __A, + -(__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W, + (__v4sf) __A, + -(__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsub_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W, + (__v2df) __A, + (__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsub_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W, + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W, + (__v2df) __A, + -(__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W, + (__v4sf) __A, + -(__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W, + -(__v2df) __A, + (__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W, + -(__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmadd_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W, + -(__v2df) __A, + (__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmadd_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W, + -(__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W, + -(__v2df) __A, + (__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W, + -(__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W, + -(__v2df) __A, + -(__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W, + -(__v4sf) __A, + -(__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmsub_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W, + -(__v2df) __A, + (__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmsub_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W, + -(__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W, + -(__v2df) __A, + -(__v2df) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W, + -(__v4sf) __A, + -(__v4sf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W, + (__v2df) __A, + (__v2df) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W, + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_round_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U, + const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W, + (__v2df) __A, + (__v2df) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_round_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U, + const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W, + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_round_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W, + (__v2df) __A, + (__v2df) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_round_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W, + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsub_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W, + (__v2df) __A, + -(__v2df) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsub_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W, + (__v4sf) __A, + -(__v4sf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsub_round_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U, + const int __R) +{ + return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W, + (__v2df) __A, + (__v2df) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsub_round_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U, + const int __R) +{ + return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W, + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsub_round_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W, + (__v2df) __A, + -(__v2df) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsub_round_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W, + (__v4sf) __A, + -(__v4sf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmadd_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W, + -(__v2df) __A, + (__v2df) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmadd_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W, + -(__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmadd_round_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U, + const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W, + -(__v2df) __A, + (__v2df) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmadd_round_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U, + const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W, + -(__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmadd_round_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W, + -(__v2df) __A, + (__v2df) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmadd_round_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W, + -(__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmsub_round_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_mask ((__v2df) __W, + -(__v2df) __A, + -(__v2df) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmsub_round_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W, + -(__v4sf) __A, + -(__v4sf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmsub_round_sd (__m128d __W, __m128d __A, __m128d __B, __mmask8 __U, + const int __R) +{ + return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W, + -(__v2df) __A, + (__v2df) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmsub_round_ss (__m128 __W, __m128 __A, __m128 __B, __mmask8 __U, + const int __R) +{ + return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W, + -(__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmsub_round_sd (__mmask8 __U, __m128d __W, __m128d __A, __m128d __B, + const int __R) +{ + return (__m128d) __builtin_ia32_vfmaddsd3_maskz ((__v2df) __W, + -(__v2df) __A, + -(__v2df) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmsub_round_ss (__mmask8 __U, __m128 __W, __m128 __A, __m128 __B, + const int __R) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __W, + -(__v4sf) __A, + -(__v4sf) __B, + (__mmask8) __U, __R); +} +#else +#define _mm_mask_fmadd_round_sd(A, U, B, C, R) \ + (__m128d) __builtin_ia32_vfmaddsd3_mask (A, B, C, U, R) + +#define _mm_mask_fmadd_round_ss(A, U, B, C, R) \ + (__m128) __builtin_ia32_vfmaddss3_mask (A, B, C, U, R) + +#define _mm_mask3_fmadd_round_sd(A, B, C, U, R) \ + (__m128d) __builtin_ia32_vfmaddsd3_mask3 (A, B, C, U, R) + +#define _mm_mask3_fmadd_round_ss(A, B, C, U, R) \ + (__m128) __builtin_ia32_vfmaddss3_mask3 (A, B, C, U, R) + +#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \ + (__m128d) __builtin_ia32_vfmaddsd3_maskz (A, B, C, U, R) + +#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \ + (__m128) __builtin_ia32_vfmaddss3_maskz (A, B, C, U, R) + +#define _mm_mask_fmsub_round_sd(A, U, B, C, R) \ + (__m128d) __builtin_ia32_vfmaddsd3_mask (A, B, -(C), U, R) + +#define _mm_mask_fmsub_round_ss(A, U, B, C, R) \ + (__m128) __builtin_ia32_vfmaddss3_mask (A, B, -(C), U, R) + +#define _mm_mask3_fmsub_round_sd(A, B, C, U, R) \ + (__m128d) __builtin_ia32_vfmsubsd3_mask3 (A, B, C, U, R) + +#define _mm_mask3_fmsub_round_ss(A, B, C, U, R) \ + (__m128) __builtin_ia32_vfmsubss3_mask3 (A, B, C, U, R) + +#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \ + (__m128d) __builtin_ia32_vfmaddsd3_maskz (A, B, -(C), U, R) + +#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \ + (__m128) __builtin_ia32_vfmaddss3_maskz (A, B, -(C), U, R) + +#define _mm_mask_fnmadd_round_sd(A, U, B, C, R) \ + (__m128d) __builtin_ia32_vfmaddsd3_mask (A, -(B), C, U, R) + +#define _mm_mask_fnmadd_round_ss(A, U, B, C, R) \ + (__m128) __builtin_ia32_vfmaddss3_mask (A, -(B), C, U, R) + +#define _mm_mask3_fnmadd_round_sd(A, B, C, U, R) \ + (__m128d) __builtin_ia32_vfmaddsd3_mask3 (A, -(B), C, U, R) + +#define _mm_mask3_fnmadd_round_ss(A, B, C, U, R) \ + (__m128) __builtin_ia32_vfmaddss3_mask3 (A, -(B), C, U, R) + +#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \ + (__m128d) __builtin_ia32_vfmaddsd3_maskz (A, -(B), C, U, R) + +#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \ + (__m128) __builtin_ia32_vfmaddss3_maskz (A, -(B), C, U, R) + +#define _mm_mask_fnmsub_round_sd(A, U, B, C, R) \ + (__m128d) __builtin_ia32_vfmaddsd3_mask (A, -(B), -(C), U, R) + +#define _mm_mask_fnmsub_round_ss(A, U, B, C, R) \ + (__m128) __builtin_ia32_vfmaddss3_mask (A, -(B), -(C), U, R) + +#define _mm_mask3_fnmsub_round_sd(A, B, C, U, R) \ + (__m128d) __builtin_ia32_vfmsubsd3_mask3 (A, -(B), C, U, R) + +#define _mm_mask3_fnmsub_round_ss(A, B, C, U, R) \ + (__m128) __builtin_ia32_vfmsubss3_mask3 (A, -(B), C, U, R) + +#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \ + (__m128d) __builtin_ia32_vfmaddsd3_maskz (A, -(B), -(C), U, R) + +#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \ + (__m128) __builtin_ia32_vfmaddss3_maskz (A, -(B), -(C), U, R) +#endif + +#ifdef __OPTIMIZE__ +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comi_round_ss (__m128 __A, __m128 __B, const int __P, const int __R) +{ + return __builtin_ia32_vcomiss ((__v4sf) __A, (__v4sf) __B, __P, __R); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comi_round_sd (__m128d __A, __m128d __B, const int __P, const int __R) +{ + return __builtin_ia32_vcomisd ((__v2df) __A, (__v2df) __B, __P, __R); +} +#else +#define _mm_comi_round_ss(A, B, C, D)\ +__builtin_ia32_vcomiss(A, B, C, D) +#define _mm_comi_round_sd(A, B, C, D)\ +__builtin_ia32_vcomisd(A, B, C, D) +#endif + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sqrt_pd (__m512d __A) +{ + return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sqrt_ps (__m512 __A) +{ + return (__m512) __builtin_ia32_sqrtps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sqrt_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_sqrtps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sqrt_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_sqrtps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_add_pd (__m512d __A, __m512d __B) +{ + return (__m512d) ((__v8df)__A + (__v8df)__B); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_add_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_add_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_add_ps (__m512 __A, __m512 __B) +{ + return (__m512) ((__v16sf)__A + (__v16sf)__B); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_add_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_add_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_addsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_addsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_addss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_addss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sub_pd (__m512d __A, __m512d __B) +{ + return (__m512d) ((__v8df)__A - (__v8df)__B); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sub_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sub_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sub_ps (__m512 __A, __m512 __B) +{ + return (__m512) ((__v16sf)__A - (__v16sf)__B); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sub_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sub_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_subsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_subsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_subss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_subss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mul_pd (__m512d __A, __m512d __B) +{ + return (__m512d) ((__v8df)__A * (__v8df)__B); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mul_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mul_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mul_ps (__m512 __A, __m512 __B) +{ + return (__m512) ((__v16sf)__A * (__v16sf)__B); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mul_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mul_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mul_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B) +{ + return (__m128d) __builtin_ia32_mulsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mul_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_mulsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mul_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B) +{ + return (__m128) __builtin_ia32_mulss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mul_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_mulss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_div_pd (__m512d __M, __m512d __V) +{ + return (__m512d) ((__v8df)__M / (__v8df)__V); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_div_pd (__m512d __W, __mmask8 __U, __m512d __M, __m512d __V) +{ + return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __M, + (__v8df) __V, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_div_pd (__mmask8 __U, __m512d __M, __m512d __V) +{ + return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __M, + (__v8df) __V, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_div_ps (__m512 __A, __m512 __B) +{ + return (__m512) ((__v16sf)__A / (__v16sf)__B); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_div_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_div_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_div_sd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B) +{ + return (__m128d) __builtin_ia32_divsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_div_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_divsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_div_ss (__m128 __W, __mmask8 __U, __m128 __A, + __m128 __B) +{ + return (__m128) __builtin_ia32_divss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_div_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_divss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_pd (__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_ps (__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_maxsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_maxsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_maxss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_maxss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_pd (__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_ps (__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_minsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_minsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_minss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_minss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_scalef_pd (__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_scalef_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_scalef_ps (__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_scalef_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_scalef_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_scalef_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmadd_pd (__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmadd_pd (__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmadd_pd (__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmadd_pd (__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmadd_ps (__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmadd_ps (__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmadd_ps (__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmadd_ps (__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmsub_pd (__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmsub_pd (__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmsub_pd (__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmsub_pd (__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmsubpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmsub_ps (__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmsub_ps (__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmsub_ps (__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmsub_ps (__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmsubps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmaddsub_pd (__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmaddsub_pd (__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmaddsub_pd (__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmaddsub_pd (__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmaddsub_ps (__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmaddsub_ps (__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmaddsub_ps (__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmaddsub_ps (__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmsubadd_pd (__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmsubadd_pd (__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmsubadd_pd (__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmsubadd_pd (__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmsubadd_ps (__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmsubadd_ps (__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmsubadd_ps (__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmsubadd_ps (__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fnmadd_pd (__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fnmadd_pd (__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fnmadd_pd (__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfnmaddpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fnmadd_pd (__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfnmaddpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fnmadd_ps (__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fnmadd_ps (__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fnmadd_ps (__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfnmaddps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fnmadd_ps (__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfnmaddps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fnmsub_pd (__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fnmsub_pd (__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fnmsub_pd (__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfnmsubpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fnmsub_pd (__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfnmsubpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fnmsub_ps (__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fnmsub_ps (__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fnmsub_ps (__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfnmsubps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fnmsub_ps (__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfnmsubps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttpd_epi32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttpd_epu32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtpd_epi32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtpd_epu32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtpd_epu32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttps_epi32 (__m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttps_epi32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttps_epu32 (__m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttps_epu32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtps_epi32 (__m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtps_epi32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtps_epu32 (__m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_undefined_epi32 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtps_epu32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtps_epu32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline double +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtsd_f64 (__m512d __A) +{ + return __A[0]; +} + +extern __inline float +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtss_f32 (__m512 __A) +{ + return __A[0]; +} + +#ifdef __x86_64__ +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtu64_ss (__m128 __A, unsigned long long __B) +{ + return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtu64_sd (__m128d __A, unsigned long long __B) +{ + return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtu32_ss (__m128 __A, unsigned __B) +{ + return (__m128) __builtin_ia32_cvtusi2ss32 ((__v4sf) __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi32_ps (__m512i __A) +{ + return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A) +{ + return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A) +{ + return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepu32_ps (__m512i __A) +{ + return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A) +{ + return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A) +{ + return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fixupimm_pd (__m512d __A, __m512d __B, __m512i __C, const int __imm) +{ + return (__m512d) __builtin_ia32_fixupimmpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8di) __C, + __imm, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fixupimm_pd (__m512d __A, __mmask8 __U, __m512d __B, + __m512i __C, const int __imm) +{ + return (__m512d) __builtin_ia32_fixupimmpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8di) __C, + __imm, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fixupimm_pd (__mmask8 __U, __m512d __A, __m512d __B, + __m512i __C, const int __imm) +{ + return (__m512d) __builtin_ia32_fixupimmpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8di) __C, + __imm, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fixupimm_ps (__m512 __A, __m512 __B, __m512i __C, const int __imm) +{ + return (__m512) __builtin_ia32_fixupimmps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16si) __C, + __imm, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fixupimm_ps (__m512 __A, __mmask16 __U, __m512 __B, + __m512i __C, const int __imm) +{ + return (__m512) __builtin_ia32_fixupimmps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16si) __C, + __imm, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fixupimm_ps (__mmask16 __U, __m512 __A, __m512 __B, + __m512i __C, const int __imm) +{ + return (__m512) __builtin_ia32_fixupimmps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16si) __C, + __imm, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fixupimm_sd (__m128d __A, __m128d __B, __m128i __C, const int __imm) +{ + return (__m128d) __builtin_ia32_fixupimmsd_mask ((__v2df) __A, + (__v2df) __B, + (__v2di) __C, __imm, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fixupimm_sd (__m128d __A, __mmask8 __U, __m128d __B, + __m128i __C, const int __imm) +{ + return (__m128d) __builtin_ia32_fixupimmsd_mask ((__v2df) __A, + (__v2df) __B, + (__v2di) __C, __imm, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fixupimm_sd (__mmask8 __U, __m128d __A, __m128d __B, + __m128i __C, const int __imm) +{ + return (__m128d) __builtin_ia32_fixupimmsd_maskz ((__v2df) __A, + (__v2df) __B, + (__v2di) __C, + __imm, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fixupimm_ss (__m128 __A, __m128 __B, __m128i __C, const int __imm) +{ + return (__m128) __builtin_ia32_fixupimmss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4si) __C, __imm, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fixupimm_ss (__m128 __A, __mmask8 __U, __m128 __B, + __m128i __C, const int __imm) +{ + return (__m128) __builtin_ia32_fixupimmss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4si) __C, __imm, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fixupimm_ss (__mmask8 __U, __m128 __A, __m128 __B, + __m128i __C, const int __imm) +{ + return (__m128) __builtin_ia32_fixupimmss_maskz ((__v4sf) __A, + (__v4sf) __B, + (__v4si) __C, __imm, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} +#else +#define _mm512_fixupimm_pd(X, Y, Z, C) \ + ((__m512d)__builtin_ia32_fixupimmpd512_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (__v8di)(__m512i)(Z), (int)(C), \ + (__mmask8)(-1), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_fixupimm_pd(X, U, Y, Z, C) \ + ((__m512d)__builtin_ia32_fixupimmpd512_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (__v8di)(__m512i)(Z), (int)(C), \ + (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_fixupimm_pd(U, X, Y, Z, C) \ + ((__m512d)__builtin_ia32_fixupimmpd512_maskz ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (__v8di)(__m512i)(Z), (int)(C), \ + (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_fixupimm_ps(X, Y, Z, C) \ + ((__m512)__builtin_ia32_fixupimmps512_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), (int)(C), \ + (__mmask16)(-1), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_fixupimm_ps(X, U, Y, Z, C) \ + ((__m512)__builtin_ia32_fixupimmps512_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), (int)(C), \ + (__mmask16)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_fixupimm_ps(U, X, Y, Z, C) \ + ((__m512)__builtin_ia32_fixupimmps512_maskz ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (__v16si)(__m512i)(Z), (int)(C), \ + (__mmask16)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_fixupimm_sd(X, Y, Z, C) \ + ((__m128d)__builtin_ia32_fixupimmsd_mask ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \ + (__mmask8)(-1), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_fixupimm_sd(X, U, Y, Z, C) \ + ((__m128d)__builtin_ia32_fixupimmsd_mask ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \ + (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_fixupimm_sd(U, X, Y, Z, C) \ + ((__m128d)__builtin_ia32_fixupimmsd_maskz ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (__v2di)(__m128i)(Z), (int)(C), \ + (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_fixupimm_ss(X, Y, Z, C) \ + ((__m128)__builtin_ia32_fixupimmss_mask ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \ + (__mmask8)(-1), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_fixupimm_ss(X, U, Y, Z, C) \ + ((__m128)__builtin_ia32_fixupimmss_mask ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \ + (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_fixupimm_ss(U, X, Y, Z, C) \ + ((__m128)__builtin_ia32_fixupimmss_maskz ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__v4si)(__m128i)(Z), (int)(C), \ + (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) +#endif + +#ifdef __x86_64__ +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_u64 (__m128 __A) +{ + return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf) + __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_u64 (__m128 __A) +{ + return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf) + __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_i64 (__m128 __A) +{ + return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif /* __x86_64__ */ + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtsi512_si32 (__m512i __A) +{ + __v16si __B = (__v16si) __A; + return __B[0]; +} + +extern __inline unsigned +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_u32 (__m128 __A) +{ + return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline unsigned +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_u32 (__m128 __A) +{ + return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_i32 (__m128 __A) +{ + return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_i32 (__m128d __A) +{ + return (int) __builtin_ia32_cvtsd2si ((__v2df) __A); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_i32 (__m128 __A) +{ + return (int) __builtin_ia32_cvtss2si ((__v4sf) __A); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvti32_sd (__m128d __A, int __B) +{ + return (__m128d) __builtin_ia32_cvtsi2sd ((__v2df) __A, __B); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvti32_ss (__m128 __A, int __B) +{ + return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B); +} + +#ifdef __x86_64__ +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_u64 (__m128d __A) +{ + return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df) + __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_u64 (__m128d __A) +{ + return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df) + __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_i64 (__m128d __A) +{ + return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_i64 (__m128d __A) +{ + return (long long) __builtin_ia32_cvtsd2si64 ((__v2df) __A); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_i64 (__m128 __A) +{ + return (long long) __builtin_ia32_cvtss2si64 ((__v4sf) __A); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvti64_sd (__m128d __A, long long __B) +{ + return (__m128d) __builtin_ia32_cvtsi642sd ((__v2df) __A, __B); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvti64_ss (__m128 __A, long long __B) +{ + return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); +} +#endif /* __x86_64__ */ + +extern __inline unsigned +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_u32 (__m128d __A) +{ + return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline unsigned +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_u32 (__m128d __A) +{ + return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_i32 (__m128d __A) +{ + return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtps_pd (__m256 __A) +{ + return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtps_pd (__m512d __W, __mmask8 __U, __m256 __A) +{ + return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A) +{ + return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtph_ps (__m256i __A) +{ + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A) +{ + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A) +{ + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtpd_ps (__m512d __A) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) + _mm256_undefined_ps (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtpd_ps (__m256 __W, __mmask8 __U, __m512d __A) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_getexp_ps (__m512 __A) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_getexp_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_getexp_pd (__m512d __A) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_getexp_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getexp_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss128_round ((__v4sf) __A, + (__v4sf) __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getexp_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round ((__v2df) __A, + (__v2df) __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_getmant_pd (__m512d __A, _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m512d) __builtin_ia32_getmantpd512_mask ((__v8df) __A, + (__C << 2) | __B, + _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_getmant_pd (__m512d __W, __mmask8 __U, __m512d __A, + _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m512d) __builtin_ia32_getmantpd512_mask ((__v8df) __A, + (__C << 2) | __B, + (__v8df) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_getmant_pd (__mmask8 __U, __m512d __A, + _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m512d) __builtin_ia32_getmantpd512_mask ((__v8df) __A, + (__C << 2) | __B, + (__v8df) + _mm512_setzero_pd (), + __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_getmant_ps (__m512 __A, _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m512) __builtin_ia32_getmantps512_mask ((__v16sf) __A, + (__C << 2) | __B, + _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_getmant_ps (__m512 __W, __mmask16 __U, __m512 __A, + _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m512) __builtin_ia32_getmantps512_mask ((__v16sf) __A, + (__C << 2) | __B, + (__v16sf) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_getmant_ps (__mmask16 __U, __m512 __A, + _MM_MANTISSA_NORM_ENUM __B, _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m512) __builtin_ia32_getmantps512_mask ((__v16sf) __A, + (__C << 2) | __B, + (__v16sf) + _mm512_setzero_ps (), + __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getmant_sd (__m128d __A, __m128d __B, _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D) +{ + return (__m128d) __builtin_ia32_getmantsd_round ((__v2df) __A, + (__v2df) __B, + (__D << 2) | __C, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getmant_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B, + _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D) +{ + return (__m128d) __builtin_ia32_getmantsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__D << 2) | __C, + (__v2df) __W, + __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getmant_sd (__mmask8 __U, __m128d __A, __m128d __B, + _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D) +{ + return (__m128d) __builtin_ia32_getmantsd_mask_round ((__v2df) __A, + (__v2df) __B, + (__D << 2) | __C, + (__v2df) + _mm_setzero_pd(), + __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getmant_ss (__m128 __A, __m128 __B, _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D) +{ + return (__m128) __builtin_ia32_getmantss_round ((__v4sf) __A, + (__v4sf) __B, + (__D << 2) | __C, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getmant_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, + _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D) +{ + return (__m128) __builtin_ia32_getmantss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__D << 2) | __C, + (__v4sf) __W, + __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getmant_ss (__mmask8 __U, __m128 __A, __m128 __B, + _MM_MANTISSA_NORM_ENUM __C, _MM_MANTISSA_SIGN_ENUM __D) +{ + return (__m128) __builtin_ia32_getmantss_mask_round ((__v4sf) __A, + (__v4sf) __B, + (__D << 2) | __C, + (__v4sf) + _mm_setzero_ps(), + __U, + _MM_FROUND_CUR_DIRECTION); +} + +#else +#define _mm512_getmant_pd(X, B, C) \ + ((__m512d)__builtin_ia32_getmantpd512_mask ((__v8df)(__m512d)(X), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1,\ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_getmant_pd(W, U, X, B, C) \ + ((__m512d)__builtin_ia32_getmantpd512_mask ((__v8df)(__m512d)(X), \ + (int)(((C)<<2) | (B)), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U),\ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_getmant_pd(U, X, B, C) \ + ((__m512d)__builtin_ia32_getmantpd512_mask ((__v8df)(__m512d)(X), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U),\ + _MM_FROUND_CUR_DIRECTION)) +#define _mm512_getmant_ps(X, B, C) \ + ((__m512)__builtin_ia32_getmantps512_mask ((__v16sf)(__m512)(X), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1,\ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_getmant_ps(W, U, X, B, C) \ + ((__m512)__builtin_ia32_getmantps512_mask ((__v16sf)(__m512)(X), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U),\ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_getmant_ps(U, X, B, C) \ + ((__m512)__builtin_ia32_getmantps512_mask ((__v16sf)(__m512)(X), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U),\ + _MM_FROUND_CUR_DIRECTION)) +#define _mm_getmant_sd(X, Y, C, D) \ + ((__m128d)__builtin_ia32_getmantsd_round ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (int)(((D)<<2) | (C)), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_getmant_sd(W, U, X, Y, C, D) \ + ((__m128d)__builtin_ia32_getmantsd_mask_round ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U),\ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_getmant_sd(U, X, Y, C, D) \ + ((__m128d)__builtin_ia32_getmantsd_mask_round ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U),\ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_getmant_ss(X, Y, C, D) \ + ((__m128)__builtin_ia32_getmantss_round ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), \ + (int)(((D)<<2) | (C)), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_getmant_ss(W, U, X, Y, C, D) \ + ((__m128)__builtin_ia32_getmantss_mask_round ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U),\ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_getmant_ss(U, X, Y, C, D) \ + ((__m128)__builtin_ia32_getmantss_mask_round ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U),\ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_getexp_ss(A, B) \ + ((__m128)__builtin_ia32_getexpss128_round((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_getexp_ss(W, U, A, B) \ + (__m128)__builtin_ia32_getexpss_mask_round(A, B, W, U,\ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_getexp_ss(U, A, B) \ + (__m128)__builtin_ia32_getexpss_mask_round(A, B, (__v4sf)_mm_setzero_ps(), U,\ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_getexp_sd(A, B) \ + ((__m128d)__builtin_ia32_getexpsd128_round((__v2df)(__m128d)(A), (__v2df)(__m128d)(B),\ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_getexp_sd(W, U, A, B) \ + (__m128d)__builtin_ia32_getexpsd_mask_round(A, B, W, U,\ + _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_getexp_sd(U, A, B) \ + (__m128d)__builtin_ia32_getexpsd_mask_round(A, B, (__v2df)_mm_setzero_pd(), U,\ + _MM_FROUND_CUR_DIRECTION) + +#define _mm512_getexp_ps(A) \ + ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_undefined_ps(), (__mmask16)-1, _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_getexp_ps(W, U, A) \ + ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(W), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_getexp_ps(U, A) \ + ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), (__mmask16)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_getexp_pd(A) \ + ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_undefined_pd(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_getexp_pd(W, U, A) \ + ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(W), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_getexp_pd(U, A) \ + ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_roundscale_ps (__m512 __A, const int __imm) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, __imm, + (__v16sf) + _mm512_undefined_ps (), + -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_roundscale_ps (__m512 __A, __mmask16 __B, __m512 __C, + const int __imm) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __C, __imm, + (__v16sf) __A, + (__mmask16) __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_roundscale_ps (__mmask16 __A, __m512 __B, const int __imm) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __B, + __imm, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_roundscale_pd (__m512d __A, const int __imm) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, __imm, + (__v8df) + _mm512_undefined_pd (), + -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_roundscale_pd (__m512d __A, __mmask8 __B, __m512d __C, + const int __imm) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __C, __imm, + (__v8df) __A, + (__mmask8) __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_roundscale_pd (__mmask8 __A, __m512d __B, const int __imm) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __B, + __imm, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roundscale_ss (__m128 __A, __m128 __B, const int __imm) +{ + return (__m128) + __builtin_ia32_rndscaless_mask_round ((__v4sf) __A, + (__v4sf) __B, __imm, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_roundscale_ss (__m128 __A, __mmask8 __B, __m128 __C, __m128 __D, + const int __imm) +{ + return (__m128) + __builtin_ia32_rndscaless_mask_round ((__v4sf) __C, + (__v4sf) __D, __imm, + (__v4sf) __A, + (__mmask8) __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_roundscale_ss (__mmask8 __A, __m128 __B, __m128 __C, + const int __imm) +{ + return (__m128) + __builtin_ia32_rndscaless_mask_round ((__v4sf) __B, + (__v4sf) __C, __imm, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roundscale_sd (__m128d __A, __m128d __B, const int __imm) +{ + return (__m128d) + __builtin_ia32_rndscalesd_mask_round ((__v2df) __A, + (__v2df) __B, __imm, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_roundscale_sd (__m128d __A, __mmask8 __B, __m128d __C, __m128d __D, + const int __imm) +{ + return (__m128d) + __builtin_ia32_rndscalesd_mask_round ((__v2df) __C, + (__v2df) __D, __imm, + (__v2df) __A, + (__mmask8) __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_roundscale_sd (__mmask8 __A, __m128d __B, __m128d __C, + const int __imm) +{ + return (__m128d) + __builtin_ia32_rndscalesd_mask_round ((__v2df) __B, + (__v2df) __C, __imm, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#else +#define _mm512_roundscale_ps(A, B) \ + ((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(A), (int)(B),\ + (__v16sf)_mm512_undefined_ps(), (__mmask16)(-1), _MM_FROUND_CUR_DIRECTION)) +#define _mm512_mask_roundscale_ps(A, B, C, D) \ + ((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(C), \ + (int)(D), \ + (__v16sf)(__m512)(A), \ + (__mmask16)(B), _MM_FROUND_CUR_DIRECTION)) +#define _mm512_maskz_roundscale_ps(A, B, C) \ + ((__m512) __builtin_ia32_rndscaleps_mask ((__v16sf)(__m512)(B), \ + (int)(C), \ + (__v16sf)_mm512_setzero_ps(),\ + (__mmask16)(A), _MM_FROUND_CUR_DIRECTION)) +#define _mm512_roundscale_pd(A, B) \ + ((__m512d) __builtin_ia32_rndscalepd_mask ((__v8df)(__m512d)(A), (int)(B),\ + (__v8df)_mm512_undefined_pd(), (__mmask8)(-1), _MM_FROUND_CUR_DIRECTION)) +#define _mm512_mask_roundscale_pd(A, B, C, D) \ + ((__m512d) __builtin_ia32_rndscalepd_mask ((__v8df)(__m512d)(C), \ + (int)(D), \ + (__v8df)(__m512d)(A), \ + (__mmask8)(B), _MM_FROUND_CUR_DIRECTION)) +#define _mm512_maskz_roundscale_pd(A, B, C) \ + ((__m512d) __builtin_ia32_rndscalepd_mask ((__v8df)(__m512d)(B), \ + (int)(C), \ + (__v8df)_mm512_setzero_pd(),\ + (__mmask8)(A), _MM_FROUND_CUR_DIRECTION)) +#define _mm_roundscale_ss(A, B, I) \ + ((__m128) \ + __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (A), \ + (__v4sf) (__m128) (B), \ + (int) (I), \ + (__v4sf) _mm_setzero_ps (), \ + (__mmask8) (-1), \ + _MM_FROUND_CUR_DIRECTION)) +#define _mm_mask_roundscale_ss(A, U, B, C, I) \ + ((__m128) \ + __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (B), \ + (__v4sf) (__m128) (C), \ + (int) (I), \ + (__v4sf) (__m128) (A), \ + (__mmask8) (U), \ + _MM_FROUND_CUR_DIRECTION)) +#define _mm_maskz_roundscale_ss(U, A, B, I) \ + ((__m128) \ + __builtin_ia32_rndscaless_mask_round ((__v4sf) (__m128) (A), \ + (__v4sf) (__m128) (B), \ + (int) (I), \ + (__v4sf) _mm_setzero_ps (), \ + (__mmask8) (U), \ + _MM_FROUND_CUR_DIRECTION)) +#define _mm_roundscale_sd(A, B, I) \ + ((__m128d) \ + __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (A), \ + (__v2df) (__m128d) (B), \ + (int) (I), \ + (__v2df) _mm_setzero_pd (), \ + (__mmask8) (-1), \ + _MM_FROUND_CUR_DIRECTION)) +#define _mm_mask_roundscale_sd(A, U, B, C, I) \ + ((__m128d) \ + __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (B), \ + (__v2df) (__m128d) (C), \ + (int) (I), \ + (__v2df) (__m128d) (A), \ + (__mmask8) (U), \ + _MM_FROUND_CUR_DIRECTION)) +#define _mm_maskz_roundscale_sd(U, A, B, I) \ + ((__m128d) \ + __builtin_ia32_rndscalesd_mask_round ((__v2df) (__m128d) (A), \ + (__v2df) (__m128d) (B), \ + (int) (I), \ + (__v2df) _mm_setzero_pd (), \ + (__mmask8) (U), \ + _MM_FROUND_CUR_DIRECTION)) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_pd_mask (__m512d __X, __m512d __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, __P, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_ps_mask (__m512 __X, __m512 __Y, const int __P) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, __P, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_ps_mask (__mmask16 __U, __m512 __X, __m512 __Y, const int __P) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, __P, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_pd_mask (__mmask8 __U, __m512d __X, __m512d __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, __P, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_sd_mask (__m128d __X, __m128d __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmpsd_mask ((__v2df) __X, + (__v2df) __Y, __P, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_sd_mask (__mmask8 __M, __m128d __X, __m128d __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmpsd_mask ((__v2df) __X, + (__v2df) __Y, __P, + (__mmask8) __M, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_ss_mask (__m128 __X, __m128 __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmpss_mask ((__v4sf) __X, + (__v4sf) __Y, __P, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_ss_mask (__mmask8 __M, __m128 __X, __m128 __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmpss_mask ((__v4sf) __X, + (__v4sf) __Y, __P, + (__mmask8) __M, + _MM_FROUND_CUR_DIRECTION); +} + +#else +#define _mm512_cmp_pd_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (int)(P),\ + (__mmask8)-1,_MM_FROUND_CUR_DIRECTION)) + +#define _mm512_cmp_ps_mask(X, Y, P) \ + ((__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (int)(P),\ + (__mmask16)-1,_MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_cmp_pd_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X), \ + (__v8df)(__m512d)(Y), (int)(P),\ + (__mmask8)(M), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_cmp_ps_mask(M, X, Y, P) \ + ((__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf)(__m512)(X), \ + (__v16sf)(__m512)(Y), (int)(P),\ + (__mmask16)(M),_MM_FROUND_CUR_DIRECTION)) + +#define _mm_cmp_sd_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P),\ + (__mmask8)-1,_MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_cmp_sd_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P),\ + M,_MM_FROUND_CUR_DIRECTION)) + +#define _mm_cmp_ss_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpss_mask ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)-1,_MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_cmp_ss_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpss_mask ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + M,_MM_FROUND_CUR_DIRECTION)) +#endif + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpeq_pd_mask (__m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_EQ_OQ, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpeq_pd_mask (__mmask8 __U, __m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_EQ_OQ, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmplt_pd_mask (__m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_LT_OS, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmplt_pd_mask (__mmask8 __U, __m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_LT_OS, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmple_pd_mask (__m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_LE_OS, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmple_pd_mask (__mmask8 __U, __m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_LE_OS, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpunord_pd_mask (__m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_UNORD_Q, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpunord_pd_mask (__mmask8 __U, __m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_UNORD_Q, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpneq_pd_mask (__m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_NEQ_UQ, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpneq_pd_mask (__mmask8 __U, __m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_NEQ_UQ, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpnlt_pd_mask (__m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_NLT_US, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpnlt_pd_mask (__mmask8 __U, __m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_NLT_US, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpnle_pd_mask (__m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_NLE_US, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpnle_pd_mask (__mmask8 __U, __m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_NLE_US, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpord_pd_mask (__m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_ORD_Q, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpord_pd_mask (__mmask8 __U, __m512d __X, __m512d __Y) +{ + return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, + (__v8df) __Y, _CMP_ORD_Q, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpeq_ps_mask (__m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_EQ_OQ, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpeq_ps_mask (__mmask16 __U, __m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_EQ_OQ, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmplt_ps_mask (__m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_LT_OS, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmplt_ps_mask (__mmask16 __U, __m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_LT_OS, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmple_ps_mask (__m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_LE_OS, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmple_ps_mask (__mmask16 __U, __m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_LE_OS, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpunord_ps_mask (__m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_UNORD_Q, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpunord_ps_mask (__mmask16 __U, __m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_UNORD_Q, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpneq_ps_mask (__m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_NEQ_UQ, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpneq_ps_mask (__mmask16 __U, __m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_NEQ_UQ, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpnlt_ps_mask (__m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_NLT_US, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpnlt_ps_mask (__mmask16 __U, __m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_NLT_US, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpnle_ps_mask (__m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_NLE_US, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpnle_ps_mask (__mmask16 __U, __m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_NLE_US, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpord_ps_mask (__m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_ORD_Q, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpord_ps_mask (__mmask16 __U, __m512 __X, __m512 __Y) +{ + return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) __X, + (__v16sf) __Y, _CMP_ORD_Q, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_kmov (__mmask16 __A) +{ + return __builtin_ia32_kmovw (__A); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castpd_ps (__m512d __A) +{ + return (__m512) (__A); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castpd_si512 (__m512d __A) +{ + return (__m512i) (__A); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castps_pd (__m512 __A) +{ + return (__m512d) (__A); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castps_si512 (__m512 __A) +{ + return (__m512i) (__A); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castsi512_ps (__m512i __A) +{ + return (__m512) (__A); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castsi512_pd (__m512i __A) +{ + return (__m512d) (__A); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castpd512_pd128 (__m512d __A) +{ + return (__m128d)_mm512_extractf32x4_ps((__m512)__A, 0); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castps512_ps128 (__m512 __A) +{ + return _mm512_extractf32x4_ps(__A, 0); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castsi512_si128 (__m512i __A) +{ + return (__m128i)_mm512_extracti32x4_epi32((__m512i)__A, 0); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castpd512_pd256 (__m512d __A) +{ + return _mm512_extractf64x4_pd(__A, 0); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castps512_ps256 (__m512 __A) +{ + return (__m256)_mm512_extractf64x4_pd((__m512d)__A, 0); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castsi512_si256 (__m512i __A) +{ + return (__m256i)_mm512_extractf64x4_pd((__m512d)__A, 0); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castpd128_pd512 (__m128d __A) +{ + return (__m512d) __builtin_ia32_pd512_pd((__m128d)__A); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castps128_ps512 (__m128 __A) +{ + return (__m512) __builtin_ia32_ps512_ps((__m128)__A); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castsi128_si512 (__m128i __A) +{ + return (__m512i) __builtin_ia32_si512_si((__v4si)__A); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castpd256_pd512 (__m256d __A) +{ + return __builtin_ia32_pd512_256pd (__A); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castps256_ps512 (__m256 __A) +{ + return __builtin_ia32_ps512_256ps (__A); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castsi256_si512 (__m256i __A) +{ + return (__m512i)__builtin_ia32_si512_256si ((__v8si)__A); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_zextpd128_pd512 (__m128d __A) +{ + return (__m512d) _mm512_insertf32x4 (_mm512_setzero_ps (), (__m128) __A, 0); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_zextps128_ps512 (__m128 __A) +{ + return _mm512_insertf32x4 (_mm512_setzero_ps (), __A, 0); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_zextsi128_si512 (__m128i __A) +{ + return _mm512_inserti32x4 (_mm512_setzero_si512 (), __A, 0); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_zextpd256_pd512 (__m256d __A) +{ + return _mm512_insertf64x4 (_mm512_setzero_pd (), __A, 0); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_zextps256_ps512 (__m256 __A) +{ + return (__m512) _mm512_insertf64x4 (_mm512_setzero_pd (), (__m256d) __A, 0); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_zextsi256_si512 (__m256i __A) +{ + return _mm512_inserti64x4 (_mm512_setzero_si512 (), __A, 0); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpeq_epu32_mask (__m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __A, + (__v16si) __B, 0, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpeq_epu32_mask (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __A, + (__v16si) __B, 0, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpeq_epu64_mask (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __A, + (__v8di) __B, 0, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpeq_epu64_mask (__m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __A, + (__v8di) __B, 0, + (__mmask8) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpgt_epu32_mask (__m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __A, + (__v16si) __B, 6, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpgt_epu32_mask (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si) __A, + (__v16si) __B, 6, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmpgt_epu64_mask (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __A, + (__v8di) __B, 6, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmpgt_epu64_mask (__m512i __A, __m512i __B) +{ + return (__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di) __A, + (__v8di) __B, 6, + (__mmask8) -1); +} + +#undef __MM512_REDUCE_OP +#define __MM512_REDUCE_OP(op) \ + __v8si __T1 = (__v8si) _mm512_extracti64x4_epi64 (__A, 1); \ + __v8si __T2 = (__v8si) _mm512_extracti64x4_epi64 (__A, 0); \ + __m256i __T3 = (__m256i) (__T1 op __T2); \ + __v4si __T4 = (__v4si) _mm256_extracti128_si256 (__T3, 1); \ + __v4si __T5 = (__v4si) _mm256_extracti128_si256 (__T3, 0); \ + __v4si __T6 = __T4 op __T5; \ + __v4si __T7 = __builtin_shuffle (__T6, (__v4si) { 2, 3, 0, 1 }); \ + __v4si __T8 = __T6 op __T7; \ + return __T8[0] op __T8[1] + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_add_epi32 (__m512i __A) +{ + __MM512_REDUCE_OP (+); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_mul_epi32 (__m512i __A) +{ + __MM512_REDUCE_OP (*); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_and_epi32 (__m512i __A) +{ + __MM512_REDUCE_OP (&); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_or_epi32 (__m512i __A) +{ + __MM512_REDUCE_OP (|); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_add_epi32 (__mmask16 __U, __m512i __A) +{ + __A = _mm512_maskz_mov_epi32 (__U, __A); + __MM512_REDUCE_OP (+); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_mul_epi32 (__mmask16 __U, __m512i __A) +{ + __A = _mm512_mask_mov_epi32 (_mm512_set1_epi32 (1), __U, __A); + __MM512_REDUCE_OP (*); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_and_epi32 (__mmask16 __U, __m512i __A) +{ + __A = _mm512_mask_mov_epi32 (_mm512_set1_epi32 (~0), __U, __A); + __MM512_REDUCE_OP (&); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_or_epi32 (__mmask16 __U, __m512i __A) +{ + __A = _mm512_maskz_mov_epi32 (__U, __A); + __MM512_REDUCE_OP (|); +} + +#undef __MM512_REDUCE_OP +#define __MM512_REDUCE_OP(op) \ + __m256i __T1 = (__m256i) _mm512_extracti64x4_epi64 (__A, 1); \ + __m256i __T2 = (__m256i) _mm512_extracti64x4_epi64 (__A, 0); \ + __m256i __T3 = _mm256_##op (__T1, __T2); \ + __m128i __T4 = (__m128i) _mm256_extracti128_si256 (__T3, 1); \ + __m128i __T5 = (__m128i) _mm256_extracti128_si256 (__T3, 0); \ + __m128i __T6 = _mm_##op (__T4, __T5); \ + __m128i __T7 = (__m128i) __builtin_shuffle ((__v4si) __T6, \ + (__v4si) { 2, 3, 0, 1 }); \ + __m128i __T8 = _mm_##op (__T6, __T7); \ + __m128i __T9 = (__m128i) __builtin_shuffle ((__v4si) __T8, \ + (__v4si) { 1, 0, 1, 0 }); \ + __v4si __T10 = (__v4si) _mm_##op (__T8, __T9); \ + return __T10[0] + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_min_epi32 (__m512i __A) +{ + __MM512_REDUCE_OP (min_epi32); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_max_epi32 (__m512i __A) +{ + __MM512_REDUCE_OP (max_epi32); +} + +extern __inline unsigned int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_min_epu32 (__m512i __A) +{ + __MM512_REDUCE_OP (min_epu32); +} + +extern __inline unsigned int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_max_epu32 (__m512i __A) +{ + __MM512_REDUCE_OP (max_epu32); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_min_epi32 (__mmask16 __U, __m512i __A) +{ + __A = _mm512_mask_mov_epi32 (_mm512_set1_epi32 (__INT_MAX__), __U, __A); + __MM512_REDUCE_OP (min_epi32); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_max_epi32 (__mmask16 __U, __m512i __A) +{ + __A = _mm512_mask_mov_epi32 (_mm512_set1_epi32 (-__INT_MAX__ - 1), __U, __A); + __MM512_REDUCE_OP (max_epi32); +} + +extern __inline unsigned int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_min_epu32 (__mmask16 __U, __m512i __A) +{ + __A = _mm512_mask_mov_epi32 (_mm512_set1_epi32 (~0), __U, __A); + __MM512_REDUCE_OP (min_epu32); +} + +extern __inline unsigned int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_max_epu32 (__mmask16 __U, __m512i __A) +{ + __A = _mm512_maskz_mov_epi32 (__U, __A); + __MM512_REDUCE_OP (max_epu32); +} + +#undef __MM512_REDUCE_OP +#define __MM512_REDUCE_OP(op) \ + __m256 __T1 = (__m256) _mm512_extractf64x4_pd ((__m512d) __A, 1); \ + __m256 __T2 = (__m256) _mm512_extractf64x4_pd ((__m512d) __A, 0); \ + __m256 __T3 = __T1 op __T2; \ + __m128 __T4 = _mm256_extractf128_ps (__T3, 1); \ + __m128 __T5 = _mm256_extractf128_ps (__T3, 0); \ + __m128 __T6 = __T4 op __T5; \ + __m128 __T7 = __builtin_shuffle (__T6, (__v4si) { 2, 3, 0, 1 }); \ + __m128 __T8 = __T6 op __T7; \ + return __T8[0] op __T8[1] + +extern __inline float +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_add_ps (__m512 __A) +{ + __MM512_REDUCE_OP (+); +} + +extern __inline float +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_mul_ps (__m512 __A) +{ + __MM512_REDUCE_OP (*); +} + +extern __inline float +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_add_ps (__mmask16 __U, __m512 __A) +{ + __A = _mm512_maskz_mov_ps (__U, __A); + __MM512_REDUCE_OP (+); +} + +extern __inline float +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_mul_ps (__mmask16 __U, __m512 __A) +{ + __A = _mm512_mask_mov_ps (_mm512_set1_ps (1.0f), __U, __A); + __MM512_REDUCE_OP (*); +} + +#undef __MM512_REDUCE_OP +#define __MM512_REDUCE_OP(op) \ + __m256 __T1 = (__m256) _mm512_extractf64x4_pd ((__m512d) __A, 1); \ + __m256 __T2 = (__m256) _mm512_extractf64x4_pd ((__m512d) __A, 0); \ + __m256 __T3 = _mm256_##op (__T1, __T2); \ + __m128 __T4 = _mm256_extractf128_ps (__T3, 1); \ + __m128 __T5 = _mm256_extractf128_ps (__T3, 0); \ + __m128 __T6 = _mm_##op (__T4, __T5); \ + __m128 __T7 = __builtin_shuffle (__T6, (__v4si) { 2, 3, 0, 1 }); \ + __m128 __T8 = _mm_##op (__T6, __T7); \ + __m128 __T9 = __builtin_shuffle (__T8, (__v4si) { 1, 0, 1, 0 }); \ + __m128 __T10 = _mm_##op (__T8, __T9); \ + return __T10[0] + +extern __inline float +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_min_ps (__m512 __A) +{ + __MM512_REDUCE_OP (min_ps); +} + +extern __inline float +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_max_ps (__m512 __A) +{ + __MM512_REDUCE_OP (max_ps); +} + +extern __inline float +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_min_ps (__mmask16 __U, __m512 __A) +{ + __A = _mm512_mask_mov_ps (_mm512_set1_ps (__builtin_inff ()), __U, __A); + __MM512_REDUCE_OP (min_ps); +} + +extern __inline float +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_max_ps (__mmask16 __U, __m512 __A) +{ + __A = _mm512_mask_mov_ps (_mm512_set1_ps (-__builtin_inff ()), __U, __A); + __MM512_REDUCE_OP (max_ps); +} + +#undef __MM512_REDUCE_OP +#define __MM512_REDUCE_OP(op) \ + __v4di __T1 = (__v4di) _mm512_extracti64x4_epi64 (__A, 1); \ + __v4di __T2 = (__v4di) _mm512_extracti64x4_epi64 (__A, 0); \ + __m256i __T3 = (__m256i) (__T1 op __T2); \ + __v2di __T4 = (__v2di) _mm256_extracti128_si256 (__T3, 1); \ + __v2di __T5 = (__v2di) _mm256_extracti128_si256 (__T3, 0); \ + __v2di __T6 = __T4 op __T5; \ + return __T6[0] op __T6[1] + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_add_epi64 (__m512i __A) +{ + __MM512_REDUCE_OP (+); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_mul_epi64 (__m512i __A) +{ + __MM512_REDUCE_OP (*); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_and_epi64 (__m512i __A) +{ + __MM512_REDUCE_OP (&); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_or_epi64 (__m512i __A) +{ + __MM512_REDUCE_OP (|); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_add_epi64 (__mmask8 __U, __m512i __A) +{ + __A = _mm512_maskz_mov_epi64 (__U, __A); + __MM512_REDUCE_OP (+); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_mul_epi64 (__mmask8 __U, __m512i __A) +{ + __A = _mm512_mask_mov_epi64 (_mm512_set1_epi64 (1LL), __U, __A); + __MM512_REDUCE_OP (*); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_and_epi64 (__mmask8 __U, __m512i __A) +{ + __A = _mm512_mask_mov_epi64 (_mm512_set1_epi64 (~0LL), __U, __A); + __MM512_REDUCE_OP (&); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_or_epi64 (__mmask8 __U, __m512i __A) +{ + __A = _mm512_maskz_mov_epi64 (__U, __A); + __MM512_REDUCE_OP (|); +} + +#undef __MM512_REDUCE_OP +#define __MM512_REDUCE_OP(op) \ + __m512i __T1 = _mm512_shuffle_i64x2 (__A, __A, 0x4e); \ + __m512i __T2 = _mm512_##op (__A, __T1); \ + __m512i __T3 \ + = (__m512i) __builtin_shuffle ((__v8di) __T2, \ + (__v8di) { 2, 3, 0, 1, 6, 7, 4, 5 });\ + __m512i __T4 = _mm512_##op (__T2, __T3); \ + __m512i __T5 \ + = (__m512i) __builtin_shuffle ((__v8di) __T4, \ + (__v8di) { 1, 0, 3, 2, 5, 4, 7, 6 });\ + __v8di __T6 = (__v8di) _mm512_##op (__T4, __T5); \ + return __T6[0] + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_min_epi64 (__m512i __A) +{ + __MM512_REDUCE_OP (min_epi64); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_max_epi64 (__m512i __A) +{ + __MM512_REDUCE_OP (max_epi64); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_min_epi64 (__mmask8 __U, __m512i __A) +{ + __A = _mm512_mask_mov_epi64 (_mm512_set1_epi64 (__LONG_LONG_MAX__), + __U, __A); + __MM512_REDUCE_OP (min_epi64); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_max_epi64 (__mmask8 __U, __m512i __A) +{ + __A = _mm512_mask_mov_epi64 (_mm512_set1_epi64 (-__LONG_LONG_MAX__ - 1), + __U, __A); + __MM512_REDUCE_OP (max_epi64); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_min_epu64 (__m512i __A) +{ + __MM512_REDUCE_OP (min_epu64); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_max_epu64 (__m512i __A) +{ + __MM512_REDUCE_OP (max_epu64); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_min_epu64 (__mmask8 __U, __m512i __A) +{ + __A = _mm512_mask_mov_epi64 (_mm512_set1_epi64 (~0LL), __U, __A); + __MM512_REDUCE_OP (min_epu64); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_max_epu64 (__mmask8 __U, __m512i __A) +{ + __A = _mm512_maskz_mov_epi64 (__U, __A); + __MM512_REDUCE_OP (max_epu64); +} + +#undef __MM512_REDUCE_OP +#define __MM512_REDUCE_OP(op) \ + __m256d __T1 = (__m256d) _mm512_extractf64x4_pd (__A, 1); \ + __m256d __T2 = (__m256d) _mm512_extractf64x4_pd (__A, 0); \ + __m256d __T3 = __T1 op __T2; \ + __m128d __T4 = _mm256_extractf128_pd (__T3, 1); \ + __m128d __T5 = _mm256_extractf128_pd (__T3, 0); \ + __m128d __T6 = __T4 op __T5; \ + return __T6[0] op __T6[1] + +extern __inline double +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_add_pd (__m512d __A) +{ + __MM512_REDUCE_OP (+); +} + +extern __inline double +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_mul_pd (__m512d __A) +{ + __MM512_REDUCE_OP (*); +} + +extern __inline double +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_add_pd (__mmask8 __U, __m512d __A) +{ + __A = _mm512_maskz_mov_pd (__U, __A); + __MM512_REDUCE_OP (+); +} + +extern __inline double +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_mul_pd (__mmask8 __U, __m512d __A) +{ + __A = _mm512_mask_mov_pd (_mm512_set1_pd (1.0), __U, __A); + __MM512_REDUCE_OP (*); +} + +#undef __MM512_REDUCE_OP +#define __MM512_REDUCE_OP(op) \ + __m256d __T1 = (__m256d) _mm512_extractf64x4_pd (__A, 1); \ + __m256d __T2 = (__m256d) _mm512_extractf64x4_pd (__A, 0); \ + __m256d __T3 = _mm256_##op (__T1, __T2); \ + __m128d __T4 = _mm256_extractf128_pd (__T3, 1); \ + __m128d __T5 = _mm256_extractf128_pd (__T3, 0); \ + __m128d __T6 = _mm_##op (__T4, __T5); \ + __m128d __T7 = (__m128d) __builtin_shuffle (__T6, (__v2di) { 1, 0 }); \ + __m128d __T8 = _mm_##op (__T6, __T7); \ + return __T8[0] + +extern __inline double +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_min_pd (__m512d __A) +{ + __MM512_REDUCE_OP (min_pd); +} + +extern __inline double +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_max_pd (__m512d __A) +{ + __MM512_REDUCE_OP (max_pd); +} + +extern __inline double +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_min_pd (__mmask8 __U, __m512d __A) +{ + __A = _mm512_mask_mov_pd (_mm512_set1_pd (__builtin_inf ()), __U, __A); + __MM512_REDUCE_OP (min_pd); +} + +extern __inline double +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_max_pd (__mmask8 __U, __m512d __A) +{ + __A = _mm512_mask_mov_pd (_mm512_set1_pd (-__builtin_inf ()), __U, __A); + __MM512_REDUCE_OP (max_pd); +} + +#undef __MM512_REDUCE_OP + +#ifdef __DISABLE_AVX512F__ +#undef __DISABLE_AVX512F__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512F__ */ + +#endif /* _AVX512FINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512fp16intrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512fp16intrin.h new file mode 100644 index 0000000..2804151 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512fp16intrin.h @@ -0,0 +1,7209 @@ +/* Copyright (C) 2019-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512FP16INTRIN_H_INCLUDED +#define __AVX512FP16INTRIN_H_INCLUDED + +#ifndef __AVX512FP16__ +#pragma GCC push_options +#pragma GCC target("avx512fp16") +#define __DISABLE_AVX512FP16__ +#endif /* __AVX512FP16__ */ + +/* Internal data types for implementing the intrinsics. */ +typedef _Float16 __v8hf __attribute__ ((__vector_size__ (16))); +typedef _Float16 __v16hf __attribute__ ((__vector_size__ (32))); +typedef _Float16 __v32hf __attribute__ ((__vector_size__ (64))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef _Float16 __m128h __attribute__ ((__vector_size__ (16), __may_alias__)); +typedef _Float16 __m256h __attribute__ ((__vector_size__ (32), __may_alias__)); +typedef _Float16 __m512h __attribute__ ((__vector_size__ (64), __may_alias__)); + +/* Unaligned version of the same type. */ +typedef _Float16 __m128h_u __attribute__ ((__vector_size__ (16), \ + __may_alias__, __aligned__ (1))); +typedef _Float16 __m256h_u __attribute__ ((__vector_size__ (32), \ + __may_alias__, __aligned__ (1))); +typedef _Float16 __m512h_u __attribute__ ((__vector_size__ (64), \ + __may_alias__, __aligned__ (1))); + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_ph (_Float16 __A7, _Float16 __A6, _Float16 __A5, + _Float16 __A4, _Float16 __A3, _Float16 __A2, + _Float16 __A1, _Float16 __A0) +{ + return __extension__ (__m128h)(__v8hf){ __A0, __A1, __A2, __A3, + __A4, __A5, __A6, __A7 }; +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_ph (_Float16 __A15, _Float16 __A14, _Float16 __A13, + _Float16 __A12, _Float16 __A11, _Float16 __A10, + _Float16 __A9, _Float16 __A8, _Float16 __A7, + _Float16 __A6, _Float16 __A5, _Float16 __A4, + _Float16 __A3, _Float16 __A2, _Float16 __A1, + _Float16 __A0) +{ + return __extension__ (__m256h)(__v16hf){ __A0, __A1, __A2, __A3, + __A4, __A5, __A6, __A7, + __A8, __A9, __A10, __A11, + __A12, __A13, __A14, __A15 }; +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set_ph (_Float16 __A31, _Float16 __A30, _Float16 __A29, + _Float16 __A28, _Float16 __A27, _Float16 __A26, + _Float16 __A25, _Float16 __A24, _Float16 __A23, + _Float16 __A22, _Float16 __A21, _Float16 __A20, + _Float16 __A19, _Float16 __A18, _Float16 __A17, + _Float16 __A16, _Float16 __A15, _Float16 __A14, + _Float16 __A13, _Float16 __A12, _Float16 __A11, + _Float16 __A10, _Float16 __A9, _Float16 __A8, + _Float16 __A7, _Float16 __A6, _Float16 __A5, + _Float16 __A4, _Float16 __A3, _Float16 __A2, + _Float16 __A1, _Float16 __A0) +{ + return __extension__ (__m512h)(__v32hf){ __A0, __A1, __A2, __A3, + __A4, __A5, __A6, __A7, + __A8, __A9, __A10, __A11, + __A12, __A13, __A14, __A15, + __A16, __A17, __A18, __A19, + __A20, __A21, __A22, __A23, + __A24, __A25, __A26, __A27, + __A28, __A29, __A30, __A31 }; +} + +/* Create vectors of elements in the reversed order from _mm_set_ph, + _mm256_set_ph and _mm512_set_ph functions. */ + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_ph (_Float16 __A0, _Float16 __A1, _Float16 __A2, + _Float16 __A3, _Float16 __A4, _Float16 __A5, + _Float16 __A6, _Float16 __A7) +{ + return _mm_set_ph (__A7, __A6, __A5, __A4, __A3, __A2, __A1, __A0); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_ph (_Float16 __A0, _Float16 __A1, _Float16 __A2, + _Float16 __A3, _Float16 __A4, _Float16 __A5, + _Float16 __A6, _Float16 __A7, _Float16 __A8, + _Float16 __A9, _Float16 __A10, _Float16 __A11, + _Float16 __A12, _Float16 __A13, _Float16 __A14, + _Float16 __A15) +{ + return _mm256_set_ph (__A15, __A14, __A13, __A12, __A11, __A10, __A9, + __A8, __A7, __A6, __A5, __A4, __A3, __A2, __A1, + __A0); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_setr_ph (_Float16 __A0, _Float16 __A1, _Float16 __A2, + _Float16 __A3, _Float16 __A4, _Float16 __A5, + _Float16 __A6, _Float16 __A7, _Float16 __A8, + _Float16 __A9, _Float16 __A10, _Float16 __A11, + _Float16 __A12, _Float16 __A13, _Float16 __A14, + _Float16 __A15, _Float16 __A16, _Float16 __A17, + _Float16 __A18, _Float16 __A19, _Float16 __A20, + _Float16 __A21, _Float16 __A22, _Float16 __A23, + _Float16 __A24, _Float16 __A25, _Float16 __A26, + _Float16 __A27, _Float16 __A28, _Float16 __A29, + _Float16 __A30, _Float16 __A31) + +{ + return _mm512_set_ph (__A31, __A30, __A29, __A28, __A27, __A26, __A25, + __A24, __A23, __A22, __A21, __A20, __A19, __A18, + __A17, __A16, __A15, __A14, __A13, __A12, __A11, + __A10, __A9, __A8, __A7, __A6, __A5, __A4, __A3, + __A2, __A1, __A0); +} + +/* Broadcast _Float16 to vector. */ + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_ph (_Float16 __A) +{ + return _mm_set_ph (__A, __A, __A, __A, __A, __A, __A, __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_ph (_Float16 __A) +{ + return _mm256_set_ph (__A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set1_ph (_Float16 __A) +{ + return _mm512_set_ph (__A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A); +} + +/* Create a vector with all zeros. */ + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setzero_ph (void) +{ + return _mm_set1_ph (0.0f); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setzero_ph (void) +{ + return _mm256_set1_ph (0.0f); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_setzero_ph (void) +{ + return _mm512_set1_ph (0.0f); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_undefined_ph (void) +{ + __m128h __Y = __Y; + return __Y; +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_undefined_ph (void) +{ + __m256h __Y = __Y; + return __Y; +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_undefined_ph (void) +{ + __m512h __Y = __Y; + return __Y; +} + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsh_h (__m128h __A) +{ + return __A[0]; +} + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtsh_h (__m256h __A) +{ + return __A[0]; +} + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtsh_h (__m512h __A) +{ + return __A[0]; +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castph_ps (__m512h __a) +{ + return (__m512) __a; +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castph_pd (__m512h __a) +{ + return (__m512d) __a; +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castph_si512 (__m512h __a) +{ + return (__m512i) __a; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castph512_ph128 (__m512h __A) +{ + union + { + __m128h a[4]; + __m512h v; + } u = { .v = __A }; + return u.a[0]; +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castph512_ph256 (__m512h __A) +{ + union + { + __m256h a[2]; + __m512h v; + } u = { .v = __A }; + return u.a[0]; +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castph128_ph512 (__m128h __A) +{ + union + { + __m128h a[4]; + __m512h v; + } u; + u.a[0] = __A; + return u.v; +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castph256_ph512 (__m256h __A) +{ + union + { + __m256h a[2]; + __m512h v; + } u; + u.a[0] = __A; + return u.v; +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_zextph128_ph512 (__m128h __A) +{ + return (__m512h) _mm512_insertf32x4 (_mm512_setzero_ps (), + (__m128) __A, 0); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_zextph256_ph512 (__m256h __A) +{ + return (__m512h) _mm512_insertf64x4 (_mm512_setzero_pd (), + (__m256d) __A, 0); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castps_ph (__m512 __a) +{ + return (__m512h) __a; +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castpd_ph (__m512d __a) +{ + return (__m512h) __a; +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_castsi512_ph (__m512i __a) +{ + return (__m512h) __a; +} + +/* Create a vector with element 0 as F and the rest zero. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_sh (_Float16 __F) +{ + return _mm_set_ph (0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, __F); +} + +/* Create a vector with element 0 as *P and the rest zero. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_sh (void const *__P) +{ + return _mm_set_ph (0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + *(_Float16 const *) __P); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_load_ph (void const *__P) +{ + return *(const __m512h *) __P; +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_load_ph (void const *__P) +{ + return *(const __m256h *) __P; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_ph (void const *__P) +{ + return *(const __m128h *) __P; +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_loadu_ph (void const *__P) +{ + return *(const __m512h_u *) __P; +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu_ph (void const *__P) +{ + return *(const __m256h_u *) __P; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_ph (void const *__P) +{ + return *(const __m128h_u *) __P; +} + +/* Stores the lower _Float16 value. */ +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_sh (void *__P, __m128h __A) +{ + *(_Float16 *) __P = ((__v8hf)__A)[0]; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_store_ph (void *__P, __m512h __A) +{ + *(__m512h *) __P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_store_ph (void *__P, __m256h __A) +{ + *(__m256h *) __P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_ph (void *__P, __m128h __A) +{ + *(__m128h *) __P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_storeu_ph (void *__P, __m512h __A) +{ + *(__m512h_u *) __P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu_ph (void *__P, __m256h __A) +{ + *(__m256h_u *) __P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_ph (void *__P, __m128h __A) +{ + *(__m128h_u *) __P = __A; +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_abs_ph (__m512h __A) +{ + return (__m512h) _mm512_and_epi32 ( _mm512_set1_epi32 (0x7FFF7FFF), + (__m512i) __A); +} + +/* Intrinsics v[add,sub,mul,div]ph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_add_ph (__m512h __A, __m512h __B) +{ + return (__m512h) ((__v32hf) __A + (__v32hf) __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_add_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D) +{ + return __builtin_ia32_addph512_mask (__C, __D, __A, __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_add_ph (__mmask32 __A, __m512h __B, __m512h __C) +{ + return __builtin_ia32_addph512_mask (__B, __C, + _mm512_setzero_ph (), __A); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sub_ph (__m512h __A, __m512h __B) +{ + return (__m512h) ((__v32hf) __A - (__v32hf) __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sub_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D) +{ + return __builtin_ia32_subph512_mask (__C, __D, __A, __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sub_ph (__mmask32 __A, __m512h __B, __m512h __C) +{ + return __builtin_ia32_subph512_mask (__B, __C, + _mm512_setzero_ph (), __A); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mul_ph (__m512h __A, __m512h __B) +{ + return (__m512h) ((__v32hf) __A * (__v32hf) __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mul_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D) +{ + return __builtin_ia32_mulph512_mask (__C, __D, __A, __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mul_ph (__mmask32 __A, __m512h __B, __m512h __C) +{ + return __builtin_ia32_mulph512_mask (__B, __C, + _mm512_setzero_ph (), __A); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_div_ph (__m512h __A, __m512h __B) +{ + return (__m512h) ((__v32hf) __A / (__v32hf) __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_div_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D) +{ + return __builtin_ia32_divph512_mask (__C, __D, __A, __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_div_ph (__mmask32 __A, __m512h __B, __m512h __C) +{ + return __builtin_ia32_divph512_mask (__B, __C, + _mm512_setzero_ph (), __A); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_add_round_ph (__m512h __A, __m512h __B, const int __C) +{ + return __builtin_ia32_addph512_mask_round (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1, __C); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_add_round_ph (__m512h __A, __mmask32 __B, __m512h __C, + __m512h __D, const int __E) +{ + return __builtin_ia32_addph512_mask_round (__C, __D, __A, __B, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_add_round_ph (__mmask32 __A, __m512h __B, __m512h __C, + const int __D) +{ + return __builtin_ia32_addph512_mask_round (__B, __C, + _mm512_setzero_ph (), + __A, __D); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sub_round_ph (__m512h __A, __m512h __B, const int __C) +{ + return __builtin_ia32_subph512_mask_round (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1, __C); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sub_round_ph (__m512h __A, __mmask32 __B, __m512h __C, + __m512h __D, const int __E) +{ + return __builtin_ia32_subph512_mask_round (__C, __D, __A, __B, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sub_round_ph (__mmask32 __A, __m512h __B, __m512h __C, + const int __D) +{ + return __builtin_ia32_subph512_mask_round (__B, __C, + _mm512_setzero_ph (), + __A, __D); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mul_round_ph (__m512h __A, __m512h __B, const int __C) +{ + return __builtin_ia32_mulph512_mask_round (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1, __C); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_mul_round_ph (__m512h __A, __mmask32 __B, __m512h __C, + __m512h __D, const int __E) +{ + return __builtin_ia32_mulph512_mask_round (__C, __D, __A, __B, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_mul_round_ph (__mmask32 __A, __m512h __B, __m512h __C, + const int __D) +{ + return __builtin_ia32_mulph512_mask_round (__B, __C, + _mm512_setzero_ph (), + __A, __D); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_div_round_ph (__m512h __A, __m512h __B, const int __C) +{ + return __builtin_ia32_divph512_mask_round (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1, __C); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_div_round_ph (__m512h __A, __mmask32 __B, __m512h __C, + __m512h __D, const int __E) +{ + return __builtin_ia32_divph512_mask_round (__C, __D, __A, __B, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_div_round_ph (__mmask32 __A, __m512h __B, __m512h __C, + const int __D) +{ + return __builtin_ia32_divph512_mask_round (__B, __C, + _mm512_setzero_ph (), + __A, __D); +} +#else +#define _mm512_add_round_ph(A, B, C) \ + ((__m512h)__builtin_ia32_addph512_mask_round((A), (B), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, (C))) + +#define _mm512_mask_add_round_ph(A, B, C, D, E) \ + ((__m512h)__builtin_ia32_addph512_mask_round((C), (D), (A), (B), (E))) + +#define _mm512_maskz_add_round_ph(A, B, C, D) \ + ((__m512h)__builtin_ia32_addph512_mask_round((B), (C), \ + _mm512_setzero_ph (), \ + (A), (D))) + +#define _mm512_sub_round_ph(A, B, C) \ + ((__m512h)__builtin_ia32_subph512_mask_round((A), (B), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, (C))) + +#define _mm512_mask_sub_round_ph(A, B, C, D, E) \ + ((__m512h)__builtin_ia32_subph512_mask_round((C), (D), (A), (B), (E))) + +#define _mm512_maskz_sub_round_ph(A, B, C, D) \ + ((__m512h)__builtin_ia32_subph512_mask_round((B), (C), \ + _mm512_setzero_ph (), \ + (A), (D))) + +#define _mm512_mul_round_ph(A, B, C) \ + ((__m512h)__builtin_ia32_mulph512_mask_round((A), (B), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, (C))) + +#define _mm512_mask_mul_round_ph(A, B, C, D, E) \ + ((__m512h)__builtin_ia32_mulph512_mask_round((C), (D), (A), (B), (E))) + +#define _mm512_maskz_mul_round_ph(A, B, C, D) \ + ((__m512h)__builtin_ia32_mulph512_mask_round((B), (C), \ + _mm512_setzero_ph (), \ + (A), (D))) + +#define _mm512_div_round_ph(A, B, C) \ + ((__m512h)__builtin_ia32_divph512_mask_round((A), (B), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, (C))) + +#define _mm512_mask_div_round_ph(A, B, C, D, E) \ + ((__m512h)__builtin_ia32_divph512_mask_round((C), (D), (A), (B), (E))) + +#define _mm512_maskz_div_round_ph(A, B, C, D) \ + ((__m512h)__builtin_ia32_divph512_mask_round((B), (C), \ + _mm512_setzero_ph (), \ + (A), (D))) +#endif /* __OPTIMIZE__ */ + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_conj_pch (__m512h __A) +{ + return (__m512h) _mm512_xor_epi32 ((__m512i) __A, _mm512_set1_epi32 (1<<31)); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_conj_pch (__m512h __W, __mmask16 __U, __m512h __A) +{ + return (__m512h) + __builtin_ia32_movaps512_mask ((__v16sf) _mm512_conj_pch (__A), + (__v16sf) __W, + (__mmask16) __U); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_conj_pch (__mmask16 __U, __m512h __A) +{ + return (__m512h) + __builtin_ia32_movaps512_mask ((__v16sf) _mm512_conj_pch (__A), + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U); +} + +/* Intrinsics of v[add,sub,mul,div]sh. */ +extern __inline __m128h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_sh (__m128h __A, __m128h __B) +{ + __A[0] += __B[0]; + return __A; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_addsh_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_sh (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_addsh_mask (__B, __C, _mm_setzero_ph (), + __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_sh (__m128h __A, __m128h __B) +{ + __A[0] -= __B[0]; + return __A; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_subsh_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_sh (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_subsh_mask (__B, __C, _mm_setzero_ph (), + __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_sh (__m128h __A, __m128h __B) +{ + __A[0] *= __B[0]; + return __A; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mul_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_mulsh_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mul_sh (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_mulsh_mask (__B, __C, _mm_setzero_ph (), __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_sh (__m128h __A, __m128h __B) +{ + __A[0] /= __B[0]; + return __A; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_div_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_divsh_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_div_sh (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_divsh_mask (__B, __C, _mm_setzero_ph (), + __A); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_round_sh (__m128h __A, __m128h __B, const int __C) +{ + return __builtin_ia32_addsh_mask_round (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1, __C); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_round_sh (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, const int __E) +{ + return __builtin_ia32_addsh_mask_round (__C, __D, __A, __B, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_round_sh (__mmask8 __A, __m128h __B, __m128h __C, + const int __D) +{ + return __builtin_ia32_addsh_mask_round (__B, __C, + _mm_setzero_ph (), + __A, __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_round_sh (__m128h __A, __m128h __B, const int __C) +{ + return __builtin_ia32_subsh_mask_round (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1, __C); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_round_sh (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, const int __E) +{ + return __builtin_ia32_subsh_mask_round (__C, __D, __A, __B, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_round_sh (__mmask8 __A, __m128h __B, __m128h __C, + const int __D) +{ + return __builtin_ia32_subsh_mask_round (__B, __C, + _mm_setzero_ph (), + __A, __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_round_sh (__m128h __A, __m128h __B, const int __C) +{ + return __builtin_ia32_mulsh_mask_round (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1, __C); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mul_round_sh (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, const int __E) +{ + return __builtin_ia32_mulsh_mask_round (__C, __D, __A, __B, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mul_round_sh (__mmask8 __A, __m128h __B, __m128h __C, + const int __D) +{ + return __builtin_ia32_mulsh_mask_round (__B, __C, + _mm_setzero_ph (), + __A, __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_round_sh (__m128h __A, __m128h __B, const int __C) +{ + return __builtin_ia32_divsh_mask_round (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1, __C); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_div_round_sh (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, const int __E) +{ + return __builtin_ia32_divsh_mask_round (__C, __D, __A, __B, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_div_round_sh (__mmask8 __A, __m128h __B, __m128h __C, + const int __D) +{ + return __builtin_ia32_divsh_mask_round (__B, __C, + _mm_setzero_ph (), + __A, __D); +} +#else +#define _mm_add_round_sh(A, B, C) \ + ((__m128h)__builtin_ia32_addsh_mask_round ((A), (B), \ + _mm_setzero_ph (), \ + (__mmask8)-1, (C))) + +#define _mm_mask_add_round_sh(A, B, C, D, E) \ + ((__m128h)__builtin_ia32_addsh_mask_round ((C), (D), (A), (B), (E))) + +#define _mm_maskz_add_round_sh(A, B, C, D) \ + ((__m128h)__builtin_ia32_addsh_mask_round ((B), (C), \ + _mm_setzero_ph (), \ + (A), (D))) + +#define _mm_sub_round_sh(A, B, C) \ + ((__m128h)__builtin_ia32_subsh_mask_round ((A), (B), \ + _mm_setzero_ph (), \ + (__mmask8)-1, (C))) + +#define _mm_mask_sub_round_sh(A, B, C, D, E) \ + ((__m128h)__builtin_ia32_subsh_mask_round ((C), (D), (A), (B), (E))) + +#define _mm_maskz_sub_round_sh(A, B, C, D) \ + ((__m128h)__builtin_ia32_subsh_mask_round ((B), (C), \ + _mm_setzero_ph (), \ + (A), (D))) + +#define _mm_mul_round_sh(A, B, C) \ + ((__m128h)__builtin_ia32_mulsh_mask_round ((A), (B), \ + _mm_setzero_ph (), \ + (__mmask8)-1, (C))) + +#define _mm_mask_mul_round_sh(A, B, C, D, E) \ + ((__m128h)__builtin_ia32_mulsh_mask_round ((C), (D), (A), (B), (E))) + +#define _mm_maskz_mul_round_sh(A, B, C, D) \ + ((__m128h)__builtin_ia32_mulsh_mask_round ((B), (C), \ + _mm_setzero_ph (), \ + (A), (D))) + +#define _mm_div_round_sh(A, B, C) \ + ((__m128h)__builtin_ia32_divsh_mask_round ((A), (B), \ + _mm_setzero_ph (), \ + (__mmask8)-1, (C))) + +#define _mm_mask_div_round_sh(A, B, C, D, E) \ + ((__m128h)__builtin_ia32_divsh_mask_round ((C), (D), (A), (B), (E))) + +#define _mm_maskz_div_round_sh(A, B, C, D) \ + ((__m128h)__builtin_ia32_divsh_mask_round ((B), (C), \ + _mm_setzero_ph (), \ + (A), (D))) +#endif /* __OPTIMIZE__ */ + +/* Intrinsic vmaxph vminph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_ph (__m512h __A, __m512h __B) +{ + return __builtin_ia32_maxph512_mask (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D) +{ + return __builtin_ia32_maxph512_mask (__C, __D, __A, __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_ph (__mmask32 __A, __m512h __B, __m512h __C) +{ + return __builtin_ia32_maxph512_mask (__B, __C, + _mm512_setzero_ph (), __A); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_ph (__m512h __A, __m512h __B) +{ + return __builtin_ia32_minph512_mask (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D) +{ + return __builtin_ia32_minph512_mask (__C, __D, __A, __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_ph (__mmask32 __A, __m512h __B, __m512h __C) +{ + return __builtin_ia32_minph512_mask (__B, __C, + _mm512_setzero_ph (), __A); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_max_round_ph (__m512h __A, __m512h __B, const int __C) +{ + return __builtin_ia32_maxph512_mask_round (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1, __C); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_max_round_ph (__m512h __A, __mmask32 __B, __m512h __C, + __m512h __D, const int __E) +{ + return __builtin_ia32_maxph512_mask_round (__C, __D, __A, __B, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_max_round_ph (__mmask32 __A, __m512h __B, __m512h __C, + const int __D) +{ + return __builtin_ia32_maxph512_mask_round (__B, __C, + _mm512_setzero_ph (), + __A, __D); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_min_round_ph (__m512h __A, __m512h __B, const int __C) +{ + return __builtin_ia32_minph512_mask_round (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1, __C); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_min_round_ph (__m512h __A, __mmask32 __B, __m512h __C, + __m512h __D, const int __E) +{ + return __builtin_ia32_minph512_mask_round (__C, __D, __A, __B, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_min_round_ph (__mmask32 __A, __m512h __B, __m512h __C, + const int __D) +{ + return __builtin_ia32_minph512_mask_round (__B, __C, + _mm512_setzero_ph (), + __A, __D); +} + +#else +#define _mm512_max_round_ph(A, B, C) \ + (__builtin_ia32_maxph512_mask_round ((A), (B), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, (C))) + +#define _mm512_mask_max_round_ph(A, B, C, D, E) \ + (__builtin_ia32_maxph512_mask_round ((C), (D), (A), (B), (E))) + +#define _mm512_maskz_max_round_ph(A, B, C, D) \ + (__builtin_ia32_maxph512_mask_round ((B), (C), \ + _mm512_setzero_ph (), \ + (A), (D))) + +#define _mm512_min_round_ph(A, B, C) \ + (__builtin_ia32_minph512_mask_round ((A), (B), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, (C))) + +#define _mm512_mask_min_round_ph(A, B, C, D, E) \ + (__builtin_ia32_minph512_mask_round ((C), (D), (A), (B), (E))) + +#define _mm512_maskz_min_round_ph(A, B, C, D) \ + (__builtin_ia32_minph512_mask_round ((B), (C), \ + _mm512_setzero_ph (), \ + (A), (D))) +#endif /* __OPTIMIZE__ */ + +/* Intrinsic vmaxsh vminsh. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_sh (__m128h __A, __m128h __B) +{ + __A[0] = __A[0] > __B[0] ? __A[0] : __B[0]; + return __A; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_maxsh_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_sh (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_maxsh_mask (__B, __C, _mm_setzero_ph (), + __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_sh (__m128h __A, __m128h __B) +{ + __A[0] = __A[0] < __B[0] ? __A[0] : __B[0]; + return __A; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_minsh_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_sh (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_minsh_mask (__B, __C, _mm_setzero_ph (), + __A); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_round_sh (__m128h __A, __m128h __B, const int __C) +{ + return __builtin_ia32_maxsh_mask_round (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1, __C); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_round_sh (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, const int __E) +{ + return __builtin_ia32_maxsh_mask_round (__C, __D, __A, __B, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_round_sh (__mmask8 __A, __m128h __B, __m128h __C, + const int __D) +{ + return __builtin_ia32_maxsh_mask_round (__B, __C, + _mm_setzero_ph (), + __A, __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_round_sh (__m128h __A, __m128h __B, const int __C) +{ + return __builtin_ia32_minsh_mask_round (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1, __C); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_round_sh (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, const int __E) +{ + return __builtin_ia32_minsh_mask_round (__C, __D, __A, __B, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_round_sh (__mmask8 __A, __m128h __B, __m128h __C, + const int __D) +{ + return __builtin_ia32_minsh_mask_round (__B, __C, + _mm_setzero_ph (), + __A, __D); +} + +#else +#define _mm_max_round_sh(A, B, C) \ + (__builtin_ia32_maxsh_mask_round ((A), (B), \ + _mm_setzero_ph (), \ + (__mmask8)-1, (C))) + +#define _mm_mask_max_round_sh(A, B, C, D, E) \ + (__builtin_ia32_maxsh_mask_round ((C), (D), (A), (B), (E))) + +#define _mm_maskz_max_round_sh(A, B, C, D) \ + (__builtin_ia32_maxsh_mask_round ((B), (C), \ + _mm_setzero_ph (), \ + (A), (D))) + +#define _mm_min_round_sh(A, B, C) \ + (__builtin_ia32_minsh_mask_round ((A), (B), \ + _mm_setzero_ph (), \ + (__mmask8)-1, (C))) + +#define _mm_mask_min_round_sh(A, B, C, D, E) \ + (__builtin_ia32_minsh_mask_round ((C), (D), (A), (B), (E))) + +#define _mm_maskz_min_round_sh(A, B, C, D) \ + (__builtin_ia32_minsh_mask_round ((B), (C), \ + _mm_setzero_ph (), \ + (A), (D))) + +#endif /* __OPTIMIZE__ */ + +/* vcmpph */ +#ifdef __OPTIMIZE +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_ph_mask (__m512h __A, __m512h __B, const int __C) +{ + return (__mmask32) __builtin_ia32_cmpph512_mask (__A, __B, __C, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_ph_mask (__mmask32 __A, __m512h __B, __m512h __C, + const int __D) +{ + return (__mmask32) __builtin_ia32_cmpph512_mask (__B, __C, __D, + __A); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cmp_round_ph_mask (__m512h __A, __m512h __B, const int __C, + const int __D) +{ + return (__mmask32) __builtin_ia32_cmpph512_mask_round (__A, __B, + __C, (__mmask32) -1, + __D); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cmp_round_ph_mask (__mmask32 __A, __m512h __B, __m512h __C, + const int __D, const int __E) +{ + return (__mmask32) __builtin_ia32_cmpph512_mask_round (__B, __C, + __D, __A, + __E); +} + +#else +#define _mm512_cmp_ph_mask(A, B, C) \ + (__builtin_ia32_cmpph512_mask ((A), (B), (C), (-1))) + +#define _mm512_mask_cmp_ph_mask(A, B, C, D) \ + (__builtin_ia32_cmpph512_mask ((B), (C), (D), (A))) + +#define _mm512_cmp_round_ph_mask(A, B, C, D) \ + (__builtin_ia32_cmpph512_mask_round ((A), (B), (C), (-1), (D))) + +#define _mm512_mask_cmp_round_ph_mask(A, B, C, D, E) \ + (__builtin_ia32_cmpph512_mask_round ((B), (C), (D), (A), (E))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcmpsh. */ +#ifdef __OPTIMIZE__ +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_sh_mask (__m128h __A, __m128h __B, const int __C) +{ + return (__mmask8) + __builtin_ia32_cmpsh_mask_round (__A, __B, + __C, (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_sh_mask (__mmask8 __A, __m128h __B, __m128h __C, + const int __D) +{ + return (__mmask8) + __builtin_ia32_cmpsh_mask_round (__B, __C, + __D, __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_round_sh_mask (__m128h __A, __m128h __B, const int __C, + const int __D) +{ + return (__mmask8) __builtin_ia32_cmpsh_mask_round (__A, __B, + __C, (__mmask8) -1, + __D); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_round_sh_mask (__mmask8 __A, __m128h __B, __m128h __C, + const int __D, const int __E) +{ + return (__mmask8) __builtin_ia32_cmpsh_mask_round (__B, __C, + __D, __A, + __E); +} + +#else +#define _mm_cmp_sh_mask(A, B, C) \ + (__builtin_ia32_cmpsh_mask_round ((A), (B), (C), (-1), \ + (_MM_FROUND_CUR_DIRECTION))) + +#define _mm_mask_cmp_sh_mask(A, B, C, D) \ + (__builtin_ia32_cmpsh_mask_round ((B), (C), (D), (A), \ + (_MM_FROUND_CUR_DIRECTION))) + +#define _mm_cmp_round_sh_mask(A, B, C, D) \ + (__builtin_ia32_cmpsh_mask_round ((A), (B), (C), (-1), (D))) + +#define _mm_mask_cmp_round_sh_mask(A, B, C, D, E) \ + (__builtin_ia32_cmpsh_mask_round ((B), (C), (D), (A), (E))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcomish. */ +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comieq_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, _CMP_EQ_OS, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comilt_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, _CMP_LT_OS, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comile_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, _CMP_LE_OS, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comigt_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, _CMP_GT_OS, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comige_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, _CMP_GE_OS, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comineq_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, _CMP_NEQ_US, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomieq_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, _CMP_EQ_OQ, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomilt_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, _CMP_LT_OQ, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomile_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, _CMP_LE_OQ, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomigt_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, _CMP_GT_OQ, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomige_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, _CMP_GE_OQ, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomineq_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, _CMP_NEQ_UQ, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comi_sh (__m128h __A, __m128h __B, const int __P) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, __P, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comi_round_sh (__m128h __A, __m128h __B, const int __P, const int __R) +{ + return __builtin_ia32_cmpsh_mask_round (__A, __B, __P, + (__mmask8) -1,__R); +} + +#else +#define _mm_comi_round_sh(A, B, P, R) \ + (__builtin_ia32_cmpsh_mask_round ((A), (B), (P), (__mmask8) (-1), (R))) +#define _mm_comi_sh(A, B, P) \ + (__builtin_ia32_cmpsh_mask_round ((A), (B), (P), (__mmask8) (-1), \ + _MM_FROUND_CUR_DIRECTION)) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vsqrtph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sqrt_ph (__m512h __A) +{ + return __builtin_ia32_sqrtph512_mask_round (__A, + _mm512_setzero_ph(), + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sqrt_ph (__m512h __A, __mmask32 __B, __m512h __C) +{ + return __builtin_ia32_sqrtph512_mask_round (__C, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sqrt_ph (__mmask32 __A, __m512h __B) +{ + return __builtin_ia32_sqrtph512_mask_round (__B, + _mm512_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_sqrt_round_ph (__m512h __A, const int __B) +{ + return __builtin_ia32_sqrtph512_mask_round (__A, + _mm512_setzero_ph(), + (__mmask32) -1, __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_sqrt_round_ph (__m512h __A, __mmask32 __B, __m512h __C, + const int __D) +{ + return __builtin_ia32_sqrtph512_mask_round (__C, __A, __B, __D); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_sqrt_round_ph (__mmask32 __A, __m512h __B, const int __C) +{ + return __builtin_ia32_sqrtph512_mask_round (__B, + _mm512_setzero_ph (), + __A, __C); +} + +#else +#define _mm512_sqrt_round_ph(A, B) \ + (__builtin_ia32_sqrtph512_mask_round ((A), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, (B))) + +#define _mm512_mask_sqrt_round_ph(A, B, C, D) \ + (__builtin_ia32_sqrtph512_mask_round ((C), (A), (B), (D))) + +#define _mm512_maskz_sqrt_round_ph(A, B, C) \ + (__builtin_ia32_sqrtph512_mask_round ((B), \ + _mm512_setzero_ph (), \ + (A), (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vrsqrtph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rsqrt_ph (__m512h __A) +{ + return __builtin_ia32_rsqrtph512_mask (__A, _mm512_setzero_ph (), + (__mmask32) -1); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rsqrt_ph (__m512h __A, __mmask32 __B, __m512h __C) +{ + return __builtin_ia32_rsqrtph512_mask (__C, __A, __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rsqrt_ph (__mmask32 __A, __m512h __B) +{ + return __builtin_ia32_rsqrtph512_mask (__B, _mm512_setzero_ph (), + __A); +} + +/* Intrinsics vrsqrtsh. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_rsqrtsh_mask (__B, __A, _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rsqrt_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_rsqrtsh_mask (__D, __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rsqrt_sh (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_rsqrtsh_mask (__C, __B, _mm_setzero_ph (), + __A); +} + +/* Intrinsics vsqrtsh. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_sqrtsh_mask_round (__B, __A, + _mm_setzero_ph (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sqrt_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_sqrtsh_mask_round (__D, __C, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sqrt_sh (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_sqrtsh_mask_round (__C, __B, + _mm_setzero_ph (), + __A, _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_round_sh (__m128h __A, __m128h __B, const int __C) +{ + return __builtin_ia32_sqrtsh_mask_round (__B, __A, + _mm_setzero_ph (), + (__mmask8) -1, __C); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sqrt_round_sh (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, const int __E) +{ + return __builtin_ia32_sqrtsh_mask_round (__D, __C, __A, __B, + __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sqrt_round_sh (__mmask8 __A, __m128h __B, __m128h __C, + const int __D) +{ + return __builtin_ia32_sqrtsh_mask_round (__C, __B, + _mm_setzero_ph (), + __A, __D); +} + +#else +#define _mm_sqrt_round_sh(A, B, C) \ + (__builtin_ia32_sqrtsh_mask_round ((B), (A), \ + _mm_setzero_ph (), \ + (__mmask8)-1, (C))) + +#define _mm_mask_sqrt_round_sh(A, B, C, D, E) \ + (__builtin_ia32_sqrtsh_mask_round ((D), (C), (A), (B), (E))) + +#define _mm_maskz_sqrt_round_sh(A, B, C, D) \ + (__builtin_ia32_sqrtsh_mask_round ((C), (B), \ + _mm_setzero_ph (), \ + (A), (D))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vrcpph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_rcp_ph (__m512h __A) +{ + return __builtin_ia32_rcpph512_mask (__A, _mm512_setzero_ph (), + (__mmask32) -1); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_rcp_ph (__m512h __A, __mmask32 __B, __m512h __C) +{ + return __builtin_ia32_rcpph512_mask (__C, __A, __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_rcp_ph (__mmask32 __A, __m512h __B) +{ + return __builtin_ia32_rcpph512_mask (__B, _mm512_setzero_ph (), + __A); +} + +/* Intrinsics vrcpsh. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_rcpsh_mask (__B, __A, _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rcp_sh (__m128h __A, __mmask32 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_rcpsh_mask (__D, __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rcp_sh (__mmask32 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_rcpsh_mask (__C, __B, _mm_setzero_ph (), + __A); +} + +/* Intrinsics vscalefph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_scalef_ph (__m512h __A, __m512h __B) +{ + return __builtin_ia32_scalefph512_mask_round (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_scalef_ph (__m512h __A, __mmask32 __B, __m512h __C, __m512h __D) +{ + return __builtin_ia32_scalefph512_mask_round (__C, __D, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_scalef_ph (__mmask32 __A, __m512h __B, __m512h __C) +{ + return __builtin_ia32_scalefph512_mask_round (__B, __C, + _mm512_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_scalef_round_ph (__m512h __A, __m512h __B, const int __C) +{ + return __builtin_ia32_scalefph512_mask_round (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1, __C); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_scalef_round_ph (__m512h __A, __mmask32 __B, __m512h __C, + __m512h __D, const int __E) +{ + return __builtin_ia32_scalefph512_mask_round (__C, __D, __A, __B, + __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_scalef_round_ph (__mmask32 __A, __m512h __B, __m512h __C, + const int __D) +{ + return __builtin_ia32_scalefph512_mask_round (__B, __C, + _mm512_setzero_ph (), + __A, __D); +} + +#else +#define _mm512_scalef_round_ph(A, B, C) \ + (__builtin_ia32_scalefph512_mask_round ((A), (B), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, (C))) + +#define _mm512_mask_scalef_round_ph(A, B, C, D, E) \ + (__builtin_ia32_scalefph512_mask_round ((C), (D), (A), (B), (E))) + +#define _mm512_maskz_scalef_round_ph(A, B, C, D) \ + (__builtin_ia32_scalefph512_mask_round ((B), (C), \ + _mm512_setzero_ph (), \ + (A), (D))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vscalefsh. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_scalef_sh (__m128h __A, __m128h __B) +{ + return __builtin_ia32_scalefsh_mask_round (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_scalef_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_scalefsh_mask_round (__C, __D, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_scalef_sh (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_scalefsh_mask_round (__B, __C, + _mm_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_scalef_round_sh (__m128h __A, __m128h __B, const int __C) +{ + return __builtin_ia32_scalefsh_mask_round (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1, __C); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_scalef_round_sh (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, const int __E) +{ + return __builtin_ia32_scalefsh_mask_round (__C, __D, __A, __B, + __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_scalef_round_sh (__mmask8 __A, __m128h __B, __m128h __C, + const int __D) +{ + return __builtin_ia32_scalefsh_mask_round (__B, __C, + _mm_setzero_ph (), + __A, __D); +} + +#else +#define _mm_scalef_round_sh(A, B, C) \ + (__builtin_ia32_scalefsh_mask_round ((A), (B), \ + _mm_setzero_ph (), \ + (__mmask8)-1, (C))) + +#define _mm_mask_scalef_round_sh(A, B, C, D, E) \ + (__builtin_ia32_scalefsh_mask_round ((C), (D), (A), (B), (E))) + +#define _mm_maskz_scalef_round_sh(A, B, C, D) \ + (__builtin_ia32_scalefsh_mask_round ((B), (C), _mm_setzero_ph (), \ + (A), (D))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vreduceph. */ +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_ph (__m512h __A, int __B) +{ + return __builtin_ia32_reduceph512_mask_round (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_ph (__m512h __A, __mmask32 __B, __m512h __C, int __D) +{ + return __builtin_ia32_reduceph512_mask_round (__C, __D, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_reduce_ph (__mmask32 __A, __m512h __B, int __C) +{ + return __builtin_ia32_reduceph512_mask_round (__B, __C, + _mm512_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_round_ph (__m512h __A, int __B, const int __C) +{ + return __builtin_ia32_reduceph512_mask_round (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1, __C); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_reduce_round_ph (__m512h __A, __mmask32 __B, __m512h __C, + int __D, const int __E) +{ + return __builtin_ia32_reduceph512_mask_round (__C, __D, __A, __B, + __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_reduce_round_ph (__mmask32 __A, __m512h __B, int __C, + const int __D) +{ + return __builtin_ia32_reduceph512_mask_round (__B, __C, + _mm512_setzero_ph (), + __A, __D); +} + +#else +#define _mm512_reduce_ph(A, B) \ + (__builtin_ia32_reduceph512_mask_round ((A), (B), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_reduce_ph(A, B, C, D) \ + (__builtin_ia32_reduceph512_mask_round ((C), (D), (A), (B), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_reduce_ph(A, B, C) \ + (__builtin_ia32_reduceph512_mask_round ((B), (C), \ + _mm512_setzero_ph (), \ + (A), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_reduce_round_ph(A, B, C) \ + (__builtin_ia32_reduceph512_mask_round ((A), (B), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, (C))) + +#define _mm512_mask_reduce_round_ph(A, B, C, D, E) \ + (__builtin_ia32_reduceph512_mask_round ((C), (D), (A), (B), (E))) + +#define _mm512_maskz_reduce_round_ph(A, B, C, D) \ + (__builtin_ia32_reduceph512_mask_round ((B), (C), \ + _mm512_setzero_ph (), \ + (A), (D))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vreducesh. */ +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_sh (__m128h __A, __m128h __B, int __C) +{ + return __builtin_ia32_reducesh_mask_round (__A, __B, __C, + _mm_setzero_ph (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_reduce_sh (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, int __E) +{ + return __builtin_ia32_reducesh_mask_round (__C, __D, __E, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_reduce_sh (__mmask8 __A, __m128h __B, __m128h __C, int __D) +{ + return __builtin_ia32_reducesh_mask_round (__B, __C, __D, + _mm_setzero_ph (), __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_round_sh (__m128h __A, __m128h __B, int __C, const int __D) +{ + return __builtin_ia32_reducesh_mask_round (__A, __B, __C, + _mm_setzero_ph (), + (__mmask8) -1, __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_reduce_round_sh (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, int __E, const int __F) +{ + return __builtin_ia32_reducesh_mask_round (__C, __D, __E, __A, + __B, __F); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_reduce_round_sh (__mmask8 __A, __m128h __B, __m128h __C, + int __D, const int __E) +{ + return __builtin_ia32_reducesh_mask_round (__B, __C, __D, + _mm_setzero_ph (), + __A, __E); +} + +#else +#define _mm_reduce_sh(A, B, C) \ + (__builtin_ia32_reducesh_mask_round ((A), (B), (C), \ + _mm_setzero_ph (), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_reduce_sh(A, B, C, D, E) \ + (__builtin_ia32_reducesh_mask_round ((C), (D), (E), (A), (B), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_reduce_sh(A, B, C, D) \ + (__builtin_ia32_reducesh_mask_round ((B), (C), (D), \ + _mm_setzero_ph (), \ + (A), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_reduce_round_sh(A, B, C, D) \ + (__builtin_ia32_reducesh_mask_round ((A), (B), (C), \ + _mm_setzero_ph (), \ + (__mmask8)-1, (D))) + +#define _mm_mask_reduce_round_sh(A, B, C, D, E, F) \ + (__builtin_ia32_reducesh_mask_round ((C), (D), (E), (A), (B), (F))) + +#define _mm_maskz_reduce_round_sh(A, B, C, D, E) \ + (__builtin_ia32_reducesh_mask_round ((B), (C), (D), \ + _mm_setzero_ph (), \ + (A), (E))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vrndscaleph. */ +#ifdef __OPTIMIZE__ +extern __inline __m512h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_roundscale_ph (__m512h __A, int __B) +{ + return __builtin_ia32_rndscaleph512_mask_round (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_roundscale_ph (__m512h __A, __mmask32 __B, + __m512h __C, int __D) +{ + return __builtin_ia32_rndscaleph512_mask_round (__C, __D, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_roundscale_ph (__mmask32 __A, __m512h __B, int __C) +{ + return __builtin_ia32_rndscaleph512_mask_round (__B, __C, + _mm512_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_roundscale_round_ph (__m512h __A, int __B, const int __C) +{ + return __builtin_ia32_rndscaleph512_mask_round (__A, __B, + _mm512_setzero_ph (), + (__mmask32) -1, + __C); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_roundscale_round_ph (__m512h __A, __mmask32 __B, + __m512h __C, int __D, const int __E) +{ + return __builtin_ia32_rndscaleph512_mask_round (__C, __D, __A, + __B, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_roundscale_round_ph (__mmask32 __A, __m512h __B, int __C, + const int __D) +{ + return __builtin_ia32_rndscaleph512_mask_round (__B, __C, + _mm512_setzero_ph (), + __A, __D); +} + +#else +#define _mm512_roundscale_ph(A, B) \ + (__builtin_ia32_rndscaleph512_mask_round ((A), (B), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_roundscale_ph(A, B, C, D) \ + (__builtin_ia32_rndscaleph512_mask_round ((C), (D), (A), (B), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_roundscale_ph(A, B, C) \ + (__builtin_ia32_rndscaleph512_mask_round ((B), (C), \ + _mm512_setzero_ph (), \ + (A), \ + _MM_FROUND_CUR_DIRECTION)) +#define _mm512_roundscale_round_ph(A, B, C) \ + (__builtin_ia32_rndscaleph512_mask_round ((A), (B), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, (C))) + +#define _mm512_mask_roundscale_round_ph(A, B, C, D, E) \ + (__builtin_ia32_rndscaleph512_mask_round ((C), (D), (A), (B), (E))) + +#define _mm512_maskz_roundscale_round_ph(A, B, C, D) \ + (__builtin_ia32_rndscaleph512_mask_round ((B), (C), \ + _mm512_setzero_ph (), \ + (A), (D))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vrndscalesh. */ +#ifdef __OPTIMIZE__ +extern __inline __m128h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roundscale_sh (__m128h __A, __m128h __B, int __C) +{ + return __builtin_ia32_rndscalesh_mask_round (__A, __B, __C, + _mm_setzero_ph (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_roundscale_sh (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, int __E) +{ + return __builtin_ia32_rndscalesh_mask_round (__C, __D, __E, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_roundscale_sh (__mmask8 __A, __m128h __B, __m128h __C, int __D) +{ + return __builtin_ia32_rndscalesh_mask_round (__B, __C, __D, + _mm_setzero_ph (), __A, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roundscale_round_sh (__m128h __A, __m128h __B, int __C, const int __D) +{ + return __builtin_ia32_rndscalesh_mask_round (__A, __B, __C, + _mm_setzero_ph (), + (__mmask8) -1, + __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_roundscale_round_sh (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, int __E, const int __F) +{ + return __builtin_ia32_rndscalesh_mask_round (__C, __D, __E, + __A, __B, __F); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_roundscale_round_sh (__mmask8 __A, __m128h __B, __m128h __C, + int __D, const int __E) +{ + return __builtin_ia32_rndscalesh_mask_round (__B, __C, __D, + _mm_setzero_ph (), + __A, __E); +} + +#else +#define _mm_roundscale_sh(A, B, C) \ + (__builtin_ia32_rndscalesh_mask_round ((A), (B), (C), \ + _mm_setzero_ph (), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_roundscale_sh(A, B, C, D, E) \ + (__builtin_ia32_rndscalesh_mask_round ((C), (D), (E), (A), (B), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_roundscale_sh(A, B, C, D) \ + (__builtin_ia32_rndscalesh_mask_round ((B), (C), (D), \ + _mm_setzero_ph (), \ + (A), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_roundscale_round_sh(A, B, C, D) \ + (__builtin_ia32_rndscalesh_mask_round ((A), (B), (C), \ + _mm_setzero_ph (), \ + (__mmask8)-1, (D))) + +#define _mm_mask_roundscale_round_sh(A, B, C, D, E, F) \ + (__builtin_ia32_rndscalesh_mask_round ((C), (D), (E), (A), (B), (F))) + +#define _mm_maskz_roundscale_round_sh(A, B, C, D, E) \ + (__builtin_ia32_rndscalesh_mask_round ((B), (C), (D), \ + _mm_setzero_ph (), \ + (A), (E))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfpclasssh. */ +#ifdef __OPTIMIZE__ +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fpclass_sh_mask (__m128h __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclasssh_mask ((__v8hf) __A, __imm, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fpclass_sh_mask (__mmask8 __U, __m128h __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclasssh_mask ((__v8hf) __A, __imm, __U); +} + +#else +#define _mm_fpclass_sh_mask(X, C) \ + ((__mmask8) __builtin_ia32_fpclasssh_mask ((__v8hf) (__m128h) (X), \ + (int) (C), (__mmask8) (-1))) \ + +#define _mm_mask_fpclass_sh_mask(U, X, C) \ + ((__mmask8) __builtin_ia32_fpclasssh_mask ((__v8hf) (__m128h) (X), \ + (int) (C), (__mmask8) (U))) +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfpclassph. */ +#ifdef __OPTIMIZE__ +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fpclass_ph_mask (__mmask32 __U, __m512h __A, + const int __imm) +{ + return (__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) __A, + __imm, __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fpclass_ph_mask (__m512h __A, const int __imm) +{ + return (__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) __A, + __imm, + (__mmask32) -1); +} + +#else +#define _mm512_mask_fpclass_ph_mask(u, x, c) \ + ((__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) (__m512h) (x), \ + (int) (c),(__mmask8)(u))) + +#define _mm512_fpclass_ph_mask(x, c) \ + ((__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) (__m512h) (x), \ + (int) (c),(__mmask8)-1)) +#endif /* __OPIMTIZE__ */ + +/* Intrinsics vgetexpph, vgetexpsh. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getexp_sh (__m128h __A, __m128h __B) +{ + return (__m128h) + __builtin_ia32_getexpsh_mask_round ((__v8hf) __A, (__v8hf) __B, + (__v8hf) _mm_setzero_ph (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getexp_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) +{ + return (__m128h) + __builtin_ia32_getexpsh_mask_round ((__v8hf) __A, (__v8hf) __B, + (__v8hf) __W, (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getexp_sh (__mmask8 __U, __m128h __A, __m128h __B) +{ + return (__m128h) + __builtin_ia32_getexpsh_mask_round ((__v8hf) __A, (__v8hf) __B, + (__v8hf) _mm_setzero_ph (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_getexp_ph (__m512h __A) +{ + return (__m512h) + __builtin_ia32_getexpph512_mask ((__v32hf) __A, + (__v32hf) _mm512_setzero_ph (), + (__mmask32) -1, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_getexp_ph (__m512h __W, __mmask32 __U, __m512h __A) +{ + return (__m512h) + __builtin_ia32_getexpph512_mask ((__v32hf) __A, (__v32hf) __W, + (__mmask32) __U, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_getexp_ph (__mmask32 __U, __m512h __A) +{ + return (__m512h) + __builtin_ia32_getexpph512_mask ((__v32hf) __A, + (__v32hf) _mm512_setzero_ph (), + (__mmask32) __U, _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getexp_round_sh (__m128h __A, __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_getexpsh_mask_round ((__v8hf) __A, + (__v8hf) __B, + _mm_setzero_ph (), + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getexp_round_sh (__m128h __W, __mmask8 __U, __m128h __A, + __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_getexpsh_mask_round ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __W, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getexp_round_sh (__mmask8 __U, __m128h __A, __m128h __B, + const int __R) +{ + return (__m128h) __builtin_ia32_getexpsh_mask_round ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) + _mm_setzero_ph (), + (__mmask8) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_getexp_round_ph (__m512h __A, const int __R) +{ + return (__m512h) __builtin_ia32_getexpph512_mask ((__v32hf) __A, + (__v32hf) + _mm512_setzero_ph (), + (__mmask32) -1, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_getexp_round_ph (__m512h __W, __mmask32 __U, __m512h __A, + const int __R) +{ + return (__m512h) __builtin_ia32_getexpph512_mask ((__v32hf) __A, + (__v32hf) __W, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_getexp_round_ph (__mmask32 __U, __m512h __A, const int __R) +{ + return (__m512h) __builtin_ia32_getexpph512_mask ((__v32hf) __A, + (__v32hf) + _mm512_setzero_ph (), + (__mmask32) __U, __R); +} + +#else +#define _mm_getexp_round_sh(A, B, R) \ + ((__m128h)__builtin_ia32_getexpsh_mask_round((__v8hf)(__m128h)(A), \ + (__v8hf)(__m128h)(B), \ + (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, R)) + +#define _mm_mask_getexp_round_sh(W, U, A, B, C) \ + (__m128h)__builtin_ia32_getexpsh_mask_round(A, B, W, U, C) + +#define _mm_maskz_getexp_round_sh(U, A, B, C) \ + (__m128h)__builtin_ia32_getexpsh_mask_round(A, B, \ + (__v8hf)_mm_setzero_ph(), \ + U, C) + +#define _mm512_getexp_round_ph(A, R) \ + ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \ + (__v32hf)_mm512_setzero_ph(), (__mmask32)-1, R)) + +#define _mm512_mask_getexp_round_ph(W, U, A, R) \ + ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \ + (__v32hf)(__m512h)(W), (__mmask32)(U), R)) + +#define _mm512_maskz_getexp_round_ph(U, A, R) \ + ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \ + (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), R)) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vgetmantph, vgetmantsh. */ +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getmant_sh (__m128h __A, __m128h __B, + _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D) +{ + return (__m128h) + __builtin_ia32_getmantsh_mask_round ((__v8hf) __A, (__v8hf) __B, + (__D << 2) | __C, _mm_setzero_ph (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getmant_sh (__m128h __W, __mmask8 __U, __m128h __A, + __m128h __B, _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D) +{ + return (__m128h) + __builtin_ia32_getmantsh_mask_round ((__v8hf) __A, (__v8hf) __B, + (__D << 2) | __C, (__v8hf) __W, + __U, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getmant_sh (__mmask8 __U, __m128h __A, __m128h __B, + _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D) +{ + return (__m128h) + __builtin_ia32_getmantsh_mask_round ((__v8hf) __A, (__v8hf) __B, + (__D << 2) | __C, + (__v8hf) _mm_setzero_ph(), + __U, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_getmant_ph (__m512h __A, _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A, + (__C << 2) | __B, + _mm512_setzero_ph (), + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_getmant_ph (__m512h __W, __mmask32 __U, __m512h __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A, + (__C << 2) | __B, + (__v32hf) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_getmant_ph (__mmask32 __U, __m512h __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A, + (__C << 2) | __B, + (__v32hf) + _mm512_setzero_ph (), + __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getmant_round_sh (__m128h __A, __m128h __B, + _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D, const int __R) +{ + return (__m128h) __builtin_ia32_getmantsh_mask_round ((__v8hf) __A, + (__v8hf) __B, + (__D << 2) | __C, + _mm_setzero_ph (), + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getmant_round_sh (__m128h __W, __mmask8 __U, __m128h __A, + __m128h __B, _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D, const int __R) +{ + return (__m128h) __builtin_ia32_getmantsh_mask_round ((__v8hf) __A, + (__v8hf) __B, + (__D << 2) | __C, + (__v8hf) __W, + __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getmant_round_sh (__mmask8 __U, __m128h __A, __m128h __B, + _MM_MANTISSA_NORM_ENUM __C, + _MM_MANTISSA_SIGN_ENUM __D, const int __R) +{ + return (__m128h) __builtin_ia32_getmantsh_mask_round ((__v8hf) __A, + (__v8hf) __B, + (__D << 2) | __C, + (__v8hf) + _mm_setzero_ph(), + __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_getmant_round_ph (__m512h __A, _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C, const int __R) +{ + return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A, + (__C << 2) | __B, + _mm512_setzero_ph (), + (__mmask32) -1, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_getmant_round_ph (__m512h __W, __mmask32 __U, __m512h __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C, const int __R) +{ + return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A, + (__C << 2) | __B, + (__v32hf) __W, __U, + __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_getmant_round_ph (__mmask32 __U, __m512h __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C, const int __R) +{ + return (__m512h) __builtin_ia32_getmantph512_mask ((__v32hf) __A, + (__C << 2) | __B, + (__v32hf) + _mm512_setzero_ph (), + __U, __R); +} + +#else +#define _mm512_getmant_ph(X, B, C) \ + ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \ + (int)(((C)<<2) | (B)), \ + (__v32hf)(__m512h) \ + _mm512_setzero_ph(), \ + (__mmask32)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_getmant_ph(W, U, X, B, C) \ + ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \ + (int)(((C)<<2) | (B)), \ + (__v32hf)(__m512h)(W), \ + (__mmask32)(U), \ + _MM_FROUND_CUR_DIRECTION)) + + +#define _mm512_maskz_getmant_ph(U, X, B, C) \ + ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \ + (int)(((C)<<2) | (B)), \ + (__v32hf)(__m512h) \ + _mm512_setzero_ph(), \ + (__mmask32)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_getmant_sh(X, Y, C, D) \ + ((__m128h)__builtin_ia32_getmantsh_mask_round ((__v8hf)(__m128h)(X), \ + (__v8hf)(__m128h)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v8hf)(__m128h) \ + _mm_setzero_ph (), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_getmant_sh(W, U, X, Y, C, D) \ + ((__m128h)__builtin_ia32_getmantsh_mask_round ((__v8hf)(__m128h)(X), \ + (__v8hf)(__m128h)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v8hf)(__m128h)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_getmant_sh(U, X, Y, C, D) \ + ((__m128h)__builtin_ia32_getmantsh_mask_round ((__v8hf)(__m128h)(X), \ + (__v8hf)(__m128h)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v8hf)(__m128h) \ + _mm_setzero_ph(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_getmant_round_ph(X, B, C, R) \ + ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \ + (int)(((C)<<2) | (B)), \ + (__v32hf)(__m512h) \ + _mm512_setzero_ph(), \ + (__mmask32)-1, \ + (R))) + +#define _mm512_mask_getmant_round_ph(W, U, X, B, C, R) \ + ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \ + (int)(((C)<<2) | (B)), \ + (__v32hf)(__m512h)(W), \ + (__mmask32)(U), \ + (R))) + + +#define _mm512_maskz_getmant_round_ph(U, X, B, C, R) \ + ((__m512h)__builtin_ia32_getmantph512_mask ((__v32hf)(__m512h)(X), \ + (int)(((C)<<2) | (B)), \ + (__v32hf)(__m512h) \ + _mm512_setzero_ph(), \ + (__mmask32)(U), \ + (R))) + +#define _mm_getmant_round_sh(X, Y, C, D, R) \ + ((__m128h)__builtin_ia32_getmantsh_mask_round ((__v8hf)(__m128h)(X), \ + (__v8hf)(__m128h)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v8hf)(__m128h) \ + _mm_setzero_ph (), \ + (__mmask8)-1, \ + (R))) + +#define _mm_mask_getmant_round_sh(W, U, X, Y, C, D, R) \ + ((__m128h)__builtin_ia32_getmantsh_mask_round ((__v8hf)(__m128h)(X), \ + (__v8hf)(__m128h)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v8hf)(__m128h)(W), \ + (__mmask8)(U), \ + (R))) + +#define _mm_maskz_getmant_round_sh(U, X, Y, C, D, R) \ + ((__m128h)__builtin_ia32_getmantsh_mask_round ((__v8hf)(__m128h)(X), \ + (__v8hf)(__m128h)(Y), \ + (int)(((D)<<2) | (C)), \ + (__v8hf)(__m128h) \ + _mm_setzero_ph(), \ + (__mmask8)(U), \ + (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vmovw. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi16_si128 (short __A) +{ + return _mm_set_epi16 (0, 0, 0, 0, 0, 0, 0, __A); +} + +extern __inline short +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi128_si16 (__m128i __A) +{ + return __builtin_ia32_vec_ext_v8hi ((__v8hi)__A, 0); +} + +/* Intrinsics vmovsh. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_load_sh (__m128h __A, __mmask8 __B, _Float16 const* __C) +{ + return __builtin_ia32_loadsh_mask (__C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_load_sh (__mmask8 __A, _Float16 const* __B) +{ + return __builtin_ia32_loadsh_mask (__B, _mm_setzero_ph (), __A); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_store_sh (_Float16 const* __A, __mmask8 __B, __m128h __C) +{ + __builtin_ia32_storesh_mask (__A, __C, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_move_sh (__m128h __A, __m128h __B) +{ + __A[0] = __B[0]; + return __A; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_move_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_vmovsh_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_move_sh (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_vmovsh_mask (__B, __C, _mm_setzero_ph (), __A); +} + +/* Intrinsics vcvtph2dq. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtph_epi32 (__m256h __A) +{ + return (__m512i) + __builtin_ia32_vcvtph2dq512_mask_round (__A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtph_epi32 (__m512i __A, __mmask16 __B, __m256h __C) +{ + return (__m512i) + __builtin_ia32_vcvtph2dq512_mask_round (__C, + (__v16si) __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtph_epi32 (__mmask16 __A, __m256h __B) +{ + return (__m512i) + __builtin_ia32_vcvtph2dq512_mask_round (__B, + (__v16si) + _mm512_setzero_si512 (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundph_epi32 (__m256h __A, int __B) +{ + return (__m512i) + __builtin_ia32_vcvtph2dq512_mask_round (__A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1, + __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundph_epi32 (__m512i __A, __mmask16 __B, __m256h __C, int __D) +{ + return (__m512i) + __builtin_ia32_vcvtph2dq512_mask_round (__C, + (__v16si) __A, + __B, + __D); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundph_epi32 (__mmask16 __A, __m256h __B, int __C) +{ + return (__m512i) + __builtin_ia32_vcvtph2dq512_mask_round (__B, + (__v16si) + _mm512_setzero_si512 (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundph_epi32(A, B) \ + ((__m512i) \ + __builtin_ia32_vcvtph2dq512_mask_round ((A), \ + (__v16si) \ + _mm512_setzero_si512 (), \ + (__mmask16)-1, \ + (B))) + +#define _mm512_mask_cvt_roundph_epi32(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vcvtph2dq512_mask_round ((C), (__v16si)(A), (B), (D))) + +#define _mm512_maskz_cvt_roundph_epi32(A, B, C) \ + ((__m512i) \ + __builtin_ia32_vcvtph2dq512_mask_round ((B), \ + (__v16si) \ + _mm512_setzero_si512 (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtph2udq. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtph_epu32 (__m256h __A) +{ + return (__m512i) + __builtin_ia32_vcvtph2udq512_mask_round (__A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtph_epu32 (__m512i __A, __mmask16 __B, __m256h __C) +{ + return (__m512i) + __builtin_ia32_vcvtph2udq512_mask_round (__C, + (__v16si) __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtph_epu32 (__mmask16 __A, __m256h __B) +{ + return (__m512i) + __builtin_ia32_vcvtph2udq512_mask_round (__B, + (__v16si) + _mm512_setzero_si512 (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundph_epu32 (__m256h __A, int __B) +{ + return (__m512i) + __builtin_ia32_vcvtph2udq512_mask_round (__A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1, + __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundph_epu32 (__m512i __A, __mmask16 __B, __m256h __C, int __D) +{ + return (__m512i) + __builtin_ia32_vcvtph2udq512_mask_round (__C, + (__v16si) __A, + __B, + __D); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundph_epu32 (__mmask16 __A, __m256h __B, int __C) +{ + return (__m512i) + __builtin_ia32_vcvtph2udq512_mask_round (__B, + (__v16si) + _mm512_setzero_si512 (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundph_epu32(A, B) \ + ((__m512i) \ + __builtin_ia32_vcvtph2udq512_mask_round ((A), \ + (__v16si) \ + _mm512_setzero_si512 (), \ + (__mmask16)-1, \ + (B))) + +#define _mm512_mask_cvt_roundph_epu32(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vcvtph2udq512_mask_round ((C), (__v16si)(A), (B), (D))) + +#define _mm512_maskz_cvt_roundph_epu32(A, B, C) \ + ((__m512i) \ + __builtin_ia32_vcvtph2udq512_mask_round ((B), \ + (__v16si) \ + _mm512_setzero_si512 (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvttph2dq. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttph_epi32 (__m256h __A) +{ + return (__m512i) + __builtin_ia32_vcvttph2dq512_mask_round (__A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttph_epi32 (__m512i __A, __mmask16 __B, __m256h __C) +{ + return (__m512i) + __builtin_ia32_vcvttph2dq512_mask_round (__C, + (__v16si) __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttph_epi32 (__mmask16 __A, __m256h __B) +{ + return (__m512i) + __builtin_ia32_vcvttph2dq512_mask_round (__B, + (__v16si) + _mm512_setzero_si512 (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundph_epi32 (__m256h __A, int __B) +{ + return (__m512i) + __builtin_ia32_vcvttph2dq512_mask_round (__A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1, + __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundph_epi32 (__m512i __A, __mmask16 __B, + __m256h __C, int __D) +{ + return (__m512i) + __builtin_ia32_vcvttph2dq512_mask_round (__C, + (__v16si) __A, + __B, + __D); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundph_epi32 (__mmask16 __A, __m256h __B, int __C) +{ + return (__m512i) + __builtin_ia32_vcvttph2dq512_mask_round (__B, + (__v16si) + _mm512_setzero_si512 (), + __A, + __C); +} + +#else +#define _mm512_cvtt_roundph_epi32(A, B) \ + ((__m512i) \ + __builtin_ia32_vcvttph2dq512_mask_round ((A), \ + (__v16si) \ + (_mm512_setzero_si512 ()), \ + (__mmask16)(-1), (B))) + +#define _mm512_mask_cvtt_roundph_epi32(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vcvttph2dq512_mask_round ((C), \ + (__v16si)(A), \ + (B), \ + (D))) + +#define _mm512_maskz_cvtt_roundph_epi32(A, B, C) \ + ((__m512i) \ + __builtin_ia32_vcvttph2dq512_mask_round ((B), \ + (__v16si) \ + _mm512_setzero_si512 (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvttph2udq. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttph_epu32 (__m256h __A) +{ + return (__m512i) + __builtin_ia32_vcvttph2udq512_mask_round (__A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttph_epu32 (__m512i __A, __mmask16 __B, __m256h __C) +{ + return (__m512i) + __builtin_ia32_vcvttph2udq512_mask_round (__C, + (__v16si) __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttph_epu32 (__mmask16 __A, __m256h __B) +{ + return (__m512i) + __builtin_ia32_vcvttph2udq512_mask_round (__B, + (__v16si) + _mm512_setzero_si512 (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundph_epu32 (__m256h __A, int __B) +{ + return (__m512i) + __builtin_ia32_vcvttph2udq512_mask_round (__A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1, + __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundph_epu32 (__m512i __A, __mmask16 __B, + __m256h __C, int __D) +{ + return (__m512i) + __builtin_ia32_vcvttph2udq512_mask_round (__C, + (__v16si) __A, + __B, + __D); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundph_epu32 (__mmask16 __A, __m256h __B, int __C) +{ + return (__m512i) + __builtin_ia32_vcvttph2udq512_mask_round (__B, + (__v16si) + _mm512_setzero_si512 (), + __A, + __C); +} + +#else +#define _mm512_cvtt_roundph_epu32(A, B) \ + ((__m512i) \ + __builtin_ia32_vcvttph2udq512_mask_round ((A), \ + (__v16si) \ + _mm512_setzero_si512 (), \ + (__mmask16)-1, \ + (B))) + +#define _mm512_mask_cvtt_roundph_epu32(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vcvttph2udq512_mask_round ((C), \ + (__v16si)(A), \ + (B), \ + (D))) + +#define _mm512_maskz_cvtt_roundph_epu32(A, B, C) \ + ((__m512i) \ + __builtin_ia32_vcvttph2udq512_mask_round ((B), \ + (__v16si) \ + _mm512_setzero_si512 (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtdq2ph. */ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi32_ph (__m512i __A) +{ + return __builtin_ia32_vcvtdq2ph512_mask_round ((__v16si) __A, + _mm256_setzero_ph (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi32_ph (__m256h __A, __mmask16 __B, __m512i __C) +{ + return __builtin_ia32_vcvtdq2ph512_mask_round ((__v16si) __C, + __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi32_ph (__mmask16 __A, __m512i __B) +{ + return __builtin_ia32_vcvtdq2ph512_mask_round ((__v16si) __B, + _mm256_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundepi32_ph (__m512i __A, int __B) +{ + return __builtin_ia32_vcvtdq2ph512_mask_round ((__v16si) __A, + _mm256_setzero_ph (), + (__mmask16) -1, + __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundepi32_ph (__m256h __A, __mmask16 __B, __m512i __C, int __D) +{ + return __builtin_ia32_vcvtdq2ph512_mask_round ((__v16si) __C, + __A, + __B, + __D); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundepi32_ph (__mmask16 __A, __m512i __B, int __C) +{ + return __builtin_ia32_vcvtdq2ph512_mask_round ((__v16si) __B, + _mm256_setzero_ph (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundepi32_ph(A, B) \ + (__builtin_ia32_vcvtdq2ph512_mask_round ((__v16si)(A), \ + _mm256_setzero_ph (), \ + (__mmask16)-1, \ + (B))) + +#define _mm512_mask_cvt_roundepi32_ph(A, B, C, D) \ + (__builtin_ia32_vcvtdq2ph512_mask_round ((__v16si)(C), \ + (A), \ + (B), \ + (D))) + +#define _mm512_maskz_cvt_roundepi32_ph(A, B, C) \ + (__builtin_ia32_vcvtdq2ph512_mask_round ((__v16si)(B), \ + _mm256_setzero_ph (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtudq2ph. */ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepu32_ph (__m512i __A) +{ + return __builtin_ia32_vcvtudq2ph512_mask_round ((__v16si) __A, + _mm256_setzero_ph (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu32_ph (__m256h __A, __mmask16 __B, __m512i __C) +{ + return __builtin_ia32_vcvtudq2ph512_mask_round ((__v16si) __C, + __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu32_ph (__mmask16 __A, __m512i __B) +{ + return __builtin_ia32_vcvtudq2ph512_mask_round ((__v16si) __B, + _mm256_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundepu32_ph (__m512i __A, int __B) +{ + return __builtin_ia32_vcvtudq2ph512_mask_round ((__v16si) __A, + _mm256_setzero_ph (), + (__mmask16) -1, + __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundepu32_ph (__m256h __A, __mmask16 __B, __m512i __C, int __D) +{ + return __builtin_ia32_vcvtudq2ph512_mask_round ((__v16si) __C, + __A, + __B, + __D); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundepu32_ph (__mmask16 __A, __m512i __B, int __C) +{ + return __builtin_ia32_vcvtudq2ph512_mask_round ((__v16si) __B, + _mm256_setzero_ph (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundepu32_ph(A, B) \ + (__builtin_ia32_vcvtudq2ph512_mask_round ((__v16si)(A), \ + _mm256_setzero_ph (), \ + (__mmask16)-1, \ + B)) + +#define _mm512_mask_cvt_roundepu32_ph(A, B, C, D) \ + (__builtin_ia32_vcvtudq2ph512_mask_round ((__v16si)C, \ + A, \ + B, \ + D)) + +#define _mm512_maskz_cvt_roundepu32_ph(A, B, C) \ + (__builtin_ia32_vcvtudq2ph512_mask_round ((__v16si)B, \ + _mm256_setzero_ph (), \ + A, \ + C)) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtph2qq. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtph_epi64 (__m128h __A) +{ + return __builtin_ia32_vcvtph2qq512_mask_round (__A, + _mm512_setzero_si512 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtph_epi64 (__m512i __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvtph2qq512_mask_round (__C, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtph_epi64 (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvtph2qq512_mask_round (__B, + _mm512_setzero_si512 (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundph_epi64 (__m128h __A, int __B) +{ + return __builtin_ia32_vcvtph2qq512_mask_round (__A, + _mm512_setzero_si512 (), + (__mmask8) -1, + __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundph_epi64 (__m512i __A, __mmask8 __B, __m128h __C, int __D) +{ + return __builtin_ia32_vcvtph2qq512_mask_round (__C, __A, __B, __D); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundph_epi64 (__mmask8 __A, __m128h __B, int __C) +{ + return __builtin_ia32_vcvtph2qq512_mask_round (__B, + _mm512_setzero_si512 (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundph_epi64(A, B) \ + (__builtin_ia32_vcvtph2qq512_mask_round ((A), \ + _mm512_setzero_si512 (), \ + (__mmask8)-1, \ + (B))) + +#define _mm512_mask_cvt_roundph_epi64(A, B, C, D) \ + (__builtin_ia32_vcvtph2qq512_mask_round ((C), (A), (B), (D))) + +#define _mm512_maskz_cvt_roundph_epi64(A, B, C) \ + (__builtin_ia32_vcvtph2qq512_mask_round ((B), \ + _mm512_setzero_si512 (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtph2uqq. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtph_epu64 (__m128h __A) +{ + return __builtin_ia32_vcvtph2uqq512_mask_round (__A, + _mm512_setzero_si512 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtph_epu64 (__m512i __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvtph2uqq512_mask_round (__C, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtph_epu64 (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvtph2uqq512_mask_round (__B, + _mm512_setzero_si512 (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundph_epu64 (__m128h __A, int __B) +{ + return __builtin_ia32_vcvtph2uqq512_mask_round (__A, + _mm512_setzero_si512 (), + (__mmask8) -1, + __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundph_epu64 (__m512i __A, __mmask8 __B, __m128h __C, int __D) +{ + return __builtin_ia32_vcvtph2uqq512_mask_round (__C, __A, __B, __D); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundph_epu64 (__mmask8 __A, __m128h __B, int __C) +{ + return __builtin_ia32_vcvtph2uqq512_mask_round (__B, + _mm512_setzero_si512 (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundph_epu64(A, B) \ + (__builtin_ia32_vcvtph2uqq512_mask_round ((A), \ + _mm512_setzero_si512 (), \ + (__mmask8)-1, \ + (B))) + +#define _mm512_mask_cvt_roundph_epu64(A, B, C, D) \ + (__builtin_ia32_vcvtph2uqq512_mask_round ((C), (A), (B), (D))) + +#define _mm512_maskz_cvt_roundph_epu64(A, B, C) \ + (__builtin_ia32_vcvtph2uqq512_mask_round ((B), \ + _mm512_setzero_si512 (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvttph2qq. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttph_epi64 (__m128h __A) +{ + return __builtin_ia32_vcvttph2qq512_mask_round (__A, + _mm512_setzero_si512 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttph_epi64 (__m512i __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvttph2qq512_mask_round (__C, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttph_epi64 (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvttph2qq512_mask_round (__B, + _mm512_setzero_si512 (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundph_epi64 (__m128h __A, int __B) +{ + return __builtin_ia32_vcvttph2qq512_mask_round (__A, + _mm512_setzero_si512 (), + (__mmask8) -1, + __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundph_epi64 (__m512i __A, __mmask8 __B, __m128h __C, int __D) +{ + return __builtin_ia32_vcvttph2qq512_mask_round (__C, __A, __B, __D); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundph_epi64 (__mmask8 __A, __m128h __B, int __C) +{ + return __builtin_ia32_vcvttph2qq512_mask_round (__B, + _mm512_setzero_si512 (), + __A, + __C); +} + +#else +#define _mm512_cvtt_roundph_epi64(A, B) \ + (__builtin_ia32_vcvttph2qq512_mask_round ((A), \ + _mm512_setzero_si512 (), \ + (__mmask8)-1, \ + (B))) + +#define _mm512_mask_cvtt_roundph_epi64(A, B, C, D) \ + __builtin_ia32_vcvttph2qq512_mask_round ((C), (A), (B), (D)) + +#define _mm512_maskz_cvtt_roundph_epi64(A, B, C) \ + (__builtin_ia32_vcvttph2qq512_mask_round ((B), \ + _mm512_setzero_si512 (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvttph2uqq. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttph_epu64 (__m128h __A) +{ + return __builtin_ia32_vcvttph2uqq512_mask_round (__A, + _mm512_setzero_si512 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttph_epu64 (__m512i __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvttph2uqq512_mask_round (__C, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttph_epu64 (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvttph2uqq512_mask_round (__B, + _mm512_setzero_si512 (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundph_epu64 (__m128h __A, int __B) +{ + return __builtin_ia32_vcvttph2uqq512_mask_round (__A, + _mm512_setzero_si512 (), + (__mmask8) -1, + __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundph_epu64 (__m512i __A, __mmask8 __B, __m128h __C, int __D) +{ + return __builtin_ia32_vcvttph2uqq512_mask_round (__C, __A, __B, __D); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundph_epu64 (__mmask8 __A, __m128h __B, int __C) +{ + return __builtin_ia32_vcvttph2uqq512_mask_round (__B, + _mm512_setzero_si512 (), + __A, + __C); +} + +#else +#define _mm512_cvtt_roundph_epu64(A, B) \ + (__builtin_ia32_vcvttph2uqq512_mask_round ((A), \ + _mm512_setzero_si512 (), \ + (__mmask8)-1, \ + (B))) + +#define _mm512_mask_cvtt_roundph_epu64(A, B, C, D) \ + __builtin_ia32_vcvttph2uqq512_mask_round ((C), (A), (B), (D)) + +#define _mm512_maskz_cvtt_roundph_epu64(A, B, C) \ + (__builtin_ia32_vcvttph2uqq512_mask_round ((B), \ + _mm512_setzero_si512 (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtqq2ph. */ +extern __inline __m128h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi64_ph (__m512i __A) +{ + return __builtin_ia32_vcvtqq2ph512_mask_round ((__v8di) __A, + _mm_setzero_ph (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi64_ph (__m128h __A, __mmask8 __B, __m512i __C) +{ + return __builtin_ia32_vcvtqq2ph512_mask_round ((__v8di) __C, + __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi64_ph (__mmask8 __A, __m512i __B) +{ + return __builtin_ia32_vcvtqq2ph512_mask_round ((__v8di) __B, + _mm_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundepi64_ph (__m512i __A, int __B) +{ + return __builtin_ia32_vcvtqq2ph512_mask_round ((__v8di) __A, + _mm_setzero_ph (), + (__mmask8) -1, + __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundepi64_ph (__m128h __A, __mmask8 __B, __m512i __C, int __D) +{ + return __builtin_ia32_vcvtqq2ph512_mask_round ((__v8di) __C, + __A, + __B, + __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundepi64_ph (__mmask8 __A, __m512i __B, int __C) +{ + return __builtin_ia32_vcvtqq2ph512_mask_round ((__v8di) __B, + _mm_setzero_ph (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundepi64_ph(A, B) \ + (__builtin_ia32_vcvtqq2ph512_mask_round ((__v8di)(A), \ + _mm_setzero_ph (), \ + (__mmask8)-1, \ + (B))) + +#define _mm512_mask_cvt_roundepi64_ph(A, B, C, D) \ + (__builtin_ia32_vcvtqq2ph512_mask_round ((__v8di)(C), (A), (B), (D))) + +#define _mm512_maskz_cvt_roundepi64_ph(A, B, C) \ + (__builtin_ia32_vcvtqq2ph512_mask_round ((__v8di)(B), \ + _mm_setzero_ph (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtuqq2ph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepu64_ph (__m512i __A) +{ + return __builtin_ia32_vcvtuqq2ph512_mask_round ((__v8di) __A, + _mm_setzero_ph (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu64_ph (__m128h __A, __mmask8 __B, __m512i __C) +{ + return __builtin_ia32_vcvtuqq2ph512_mask_round ((__v8di) __C, + __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu64_ph (__mmask8 __A, __m512i __B) +{ + return __builtin_ia32_vcvtuqq2ph512_mask_round ((__v8di) __B, + _mm_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundepu64_ph (__m512i __A, int __B) +{ + return __builtin_ia32_vcvtuqq2ph512_mask_round ((__v8di) __A, + _mm_setzero_ph (), + (__mmask8) -1, + __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundepu64_ph (__m128h __A, __mmask8 __B, __m512i __C, int __D) +{ + return __builtin_ia32_vcvtuqq2ph512_mask_round ((__v8di) __C, + __A, + __B, + __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundepu64_ph (__mmask8 __A, __m512i __B, int __C) +{ + return __builtin_ia32_vcvtuqq2ph512_mask_round ((__v8di) __B, + _mm_setzero_ph (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundepu64_ph(A, B) \ + (__builtin_ia32_vcvtuqq2ph512_mask_round ((__v8di)(A), \ + _mm_setzero_ph (), \ + (__mmask8)-1, \ + (B))) + +#define _mm512_mask_cvt_roundepu64_ph(A, B, C, D) \ + (__builtin_ia32_vcvtuqq2ph512_mask_round ((__v8di)(C), (A), (B), (D))) + +#define _mm512_maskz_cvt_roundepu64_ph(A, B, C) \ + (__builtin_ia32_vcvtuqq2ph512_mask_round ((__v8di)(B), \ + _mm_setzero_ph (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtph2w. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtph_epi16 (__m512h __A) +{ + return (__m512i) + __builtin_ia32_vcvtph2w512_mask_round (__A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtph_epi16 (__m512i __A, __mmask32 __B, __m512h __C) +{ + return (__m512i) + __builtin_ia32_vcvtph2w512_mask_round (__C, + (__v32hi) __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtph_epi16 (__mmask32 __A, __m512h __B) +{ + return (__m512i) + __builtin_ia32_vcvtph2w512_mask_round (__B, + (__v32hi) + _mm512_setzero_si512 (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundph_epi16 (__m512h __A, int __B) +{ + return (__m512i) + __builtin_ia32_vcvtph2w512_mask_round (__A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1, + __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundph_epi16 (__m512i __A, __mmask32 __B, __m512h __C, int __D) +{ + return (__m512i) + __builtin_ia32_vcvtph2w512_mask_round (__C, + (__v32hi) __A, + __B, + __D); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundph_epi16 (__mmask32 __A, __m512h __B, int __C) +{ + return (__m512i) + __builtin_ia32_vcvtph2w512_mask_round (__B, + (__v32hi) + _mm512_setzero_si512 (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundph_epi16(A, B) \ + ((__m512i)__builtin_ia32_vcvtph2w512_mask_round ((A), \ + (__v32hi) \ + _mm512_setzero_si512 (), \ + (__mmask32)-1, \ + (B))) + +#define _mm512_mask_cvt_roundph_epi16(A, B, C, D) \ + ((__m512i)__builtin_ia32_vcvtph2w512_mask_round ((C), \ + (__v32hi)(A), \ + (B), \ + (D))) + +#define _mm512_maskz_cvt_roundph_epi16(A, B, C) \ + ((__m512i)__builtin_ia32_vcvtph2w512_mask_round ((B), \ + (__v32hi) \ + _mm512_setzero_si512 (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtph2uw. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtph_epu16 (__m512h __A) +{ + return (__m512i) + __builtin_ia32_vcvtph2uw512_mask_round (__A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtph_epu16 (__m512i __A, __mmask32 __B, __m512h __C) +{ + return (__m512i) + __builtin_ia32_vcvtph2uw512_mask_round (__C, (__v32hi) __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtph_epu16 (__mmask32 __A, __m512h __B) +{ + return (__m512i) + __builtin_ia32_vcvtph2uw512_mask_round (__B, + (__v32hi) + _mm512_setzero_si512 (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundph_epu16 (__m512h __A, int __B) +{ + return (__m512i) + __builtin_ia32_vcvtph2uw512_mask_round (__A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1, + __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundph_epu16 (__m512i __A, __mmask32 __B, __m512h __C, int __D) +{ + return (__m512i) + __builtin_ia32_vcvtph2uw512_mask_round (__C, (__v32hi) __A, __B, __D); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundph_epu16 (__mmask32 __A, __m512h __B, int __C) +{ + return (__m512i) + __builtin_ia32_vcvtph2uw512_mask_round (__B, + (__v32hi) + _mm512_setzero_si512 (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundph_epu16(A, B) \ + ((__m512i) \ + __builtin_ia32_vcvtph2uw512_mask_round ((A), \ + (__v32hi) \ + _mm512_setzero_si512 (), \ + (__mmask32)-1, (B))) + +#define _mm512_mask_cvt_roundph_epu16(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vcvtph2uw512_mask_round ((C), (__v32hi)(A), (B), (D))) + +#define _mm512_maskz_cvt_roundph_epu16(A, B, C) \ + ((__m512i) \ + __builtin_ia32_vcvtph2uw512_mask_round ((B), \ + (__v32hi) \ + _mm512_setzero_si512 (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvttph2w. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttph_epi16 (__m512h __A) +{ + return (__m512i) + __builtin_ia32_vcvttph2w512_mask_round (__A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttph_epi16 (__m512i __A, __mmask32 __B, __m512h __C) +{ + return (__m512i) + __builtin_ia32_vcvttph2w512_mask_round (__C, + (__v32hi) __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttph_epi16 (__mmask32 __A, __m512h __B) +{ + return (__m512i) + __builtin_ia32_vcvttph2w512_mask_round (__B, + (__v32hi) + _mm512_setzero_si512 (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundph_epi16 (__m512h __A, int __B) +{ + return (__m512i) + __builtin_ia32_vcvttph2w512_mask_round (__A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1, + __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundph_epi16 (__m512i __A, __mmask32 __B, + __m512h __C, int __D) +{ + return (__m512i) + __builtin_ia32_vcvttph2w512_mask_round (__C, + (__v32hi) __A, + __B, + __D); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundph_epi16 (__mmask32 __A, __m512h __B, int __C) +{ + return (__m512i) + __builtin_ia32_vcvttph2w512_mask_round (__B, + (__v32hi) + _mm512_setzero_si512 (), + __A, + __C); +} + +#else +#define _mm512_cvtt_roundph_epi16(A, B) \ + ((__m512i) \ + __builtin_ia32_vcvttph2w512_mask_round ((A), \ + (__v32hi) \ + _mm512_setzero_si512 (), \ + (__mmask32)-1, \ + (B))) + +#define _mm512_mask_cvtt_roundph_epi16(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vcvttph2w512_mask_round ((C), \ + (__v32hi)(A), \ + (B), \ + (D))) + +#define _mm512_maskz_cvtt_roundph_epi16(A, B, C) \ + ((__m512i) \ + __builtin_ia32_vcvttph2w512_mask_round ((B), \ + (__v32hi) \ + _mm512_setzero_si512 (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvttph2uw. */ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvttph_epu16 (__m512h __A) +{ + return (__m512i) + __builtin_ia32_vcvttph2uw512_mask_round (__A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvttph_epu16 (__m512i __A, __mmask32 __B, __m512h __C) +{ + return (__m512i) + __builtin_ia32_vcvttph2uw512_mask_round (__C, + (__v32hi) __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvttph_epu16 (__mmask32 __A, __m512h __B) +{ + return (__m512i) + __builtin_ia32_vcvttph2uw512_mask_round (__B, + (__v32hi) + _mm512_setzero_si512 (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtt_roundph_epu16 (__m512h __A, int __B) +{ + return (__m512i) + __builtin_ia32_vcvttph2uw512_mask_round (__A, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) -1, + __B); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtt_roundph_epu16 (__m512i __A, __mmask32 __B, + __m512h __C, int __D) +{ + return (__m512i) + __builtin_ia32_vcvttph2uw512_mask_round (__C, + (__v32hi) __A, + __B, + __D); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtt_roundph_epu16 (__mmask32 __A, __m512h __B, int __C) +{ + return (__m512i) + __builtin_ia32_vcvttph2uw512_mask_round (__B, + (__v32hi) + _mm512_setzero_si512 (), + __A, + __C); +} + +#else +#define _mm512_cvtt_roundph_epu16(A, B) \ + ((__m512i) \ + __builtin_ia32_vcvttph2uw512_mask_round ((A), \ + (__v32hi) \ + _mm512_setzero_si512 (), \ + (__mmask32)-1, \ + (B))) + +#define _mm512_mask_cvtt_roundph_epu16(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vcvttph2uw512_mask_round ((C), \ + (__v32hi)(A), \ + (B), \ + (D))) + +#define _mm512_maskz_cvtt_roundph_epu16(A, B, C) \ + ((__m512i) \ + __builtin_ia32_vcvttph2uw512_mask_round ((B), \ + (__v32hi) \ + _mm512_setzero_si512 (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtw2ph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtepi16_ph (__m512i __A) +{ + return __builtin_ia32_vcvtw2ph512_mask_round ((__v32hi) __A, + _mm512_setzero_ph (), + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepi16_ph (__m512h __A, __mmask32 __B, __m512i __C) +{ + return __builtin_ia32_vcvtw2ph512_mask_round ((__v32hi) __C, + __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepi16_ph (__mmask32 __A, __m512i __B) +{ + return __builtin_ia32_vcvtw2ph512_mask_round ((__v32hi) __B, + _mm512_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundepi16_ph (__m512i __A, int __B) +{ + return __builtin_ia32_vcvtw2ph512_mask_round ((__v32hi) __A, + _mm512_setzero_ph (), + (__mmask32) -1, + __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundepi16_ph (__m512h __A, __mmask32 __B, __m512i __C, int __D) +{ + return __builtin_ia32_vcvtw2ph512_mask_round ((__v32hi) __C, + __A, + __B, + __D); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundepi16_ph (__mmask32 __A, __m512i __B, int __C) +{ + return __builtin_ia32_vcvtw2ph512_mask_round ((__v32hi) __B, + _mm512_setzero_ph (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundepi16_ph(A, B) \ + (__builtin_ia32_vcvtw2ph512_mask_round ((__v32hi)(A), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, \ + (B))) + +#define _mm512_mask_cvt_roundepi16_ph(A, B, C, D) \ + (__builtin_ia32_vcvtw2ph512_mask_round ((__v32hi)(C), \ + (A), \ + (B), \ + (D))) + +#define _mm512_maskz_cvt_roundepi16_ph(A, B, C) \ + (__builtin_ia32_vcvtw2ph512_mask_round ((__v32hi)(B), \ + _mm512_setzero_ph (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtuw2ph. */ + extern __inline __m512h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) + _mm512_cvtepu16_ph (__m512i __A) + { + return __builtin_ia32_vcvtuw2ph512_mask_round ((__v32hi) __A, + _mm512_setzero_ph (), + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); + } + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtepu16_ph (__m512h __A, __mmask32 __B, __m512i __C) +{ + return __builtin_ia32_vcvtuw2ph512_mask_round ((__v32hi) __C, + __A, + __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtepu16_ph (__mmask32 __A, __m512i __B) +{ + return __builtin_ia32_vcvtuw2ph512_mask_round ((__v32hi) __B, + _mm512_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundepu16_ph (__m512i __A, int __B) +{ + return __builtin_ia32_vcvtuw2ph512_mask_round ((__v32hi) __A, + _mm512_setzero_ph (), + (__mmask32) -1, + __B); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundepu16_ph (__m512h __A, __mmask32 __B, __m512i __C, int __D) +{ + return __builtin_ia32_vcvtuw2ph512_mask_round ((__v32hi) __C, + __A, + __B, + __D); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundepu16_ph (__mmask32 __A, __m512i __B, int __C) +{ + return __builtin_ia32_vcvtuw2ph512_mask_round ((__v32hi) __B, + _mm512_setzero_ph (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundepu16_ph(A, B) \ + (__builtin_ia32_vcvtuw2ph512_mask_round ((__v32hi)(A), \ + _mm512_setzero_ph (), \ + (__mmask32)-1, \ + (B))) + +#define _mm512_mask_cvt_roundepu16_ph(A, B, C, D) \ + (__builtin_ia32_vcvtuw2ph512_mask_round ((__v32hi)(C), \ + (A), \ + (B), \ + (D))) + +#define _mm512_maskz_cvt_roundepu16_ph(A, B, C) \ + (__builtin_ia32_vcvtuw2ph512_mask_round ((__v32hi)(B), \ + _mm512_setzero_ph (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtsh2si, vcvtsh2us. */ +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsh_i32 (__m128h __A) +{ + return (int) __builtin_ia32_vcvtsh2si32_round (__A, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline unsigned +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsh_u32 (__m128h __A) +{ + return (int) __builtin_ia32_vcvtsh2usi32_round (__A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsh_i32 (__m128h __A, const int __R) +{ + return (int) __builtin_ia32_vcvtsh2si32_round (__A, __R); +} + +extern __inline unsigned +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsh_u32 (__m128h __A, const int __R) +{ + return (int) __builtin_ia32_vcvtsh2usi32_round (__A, __R); +} + +#else +#define _mm_cvt_roundsh_i32(A, B) \ + ((int)__builtin_ia32_vcvtsh2si32_round ((A), (B))) +#define _mm_cvt_roundsh_u32(A, B) \ + ((int)__builtin_ia32_vcvtsh2usi32_round ((A), (B))) + +#endif /* __OPTIMIZE__ */ + +#ifdef __x86_64__ +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsh_i64 (__m128h __A) +{ + return (long long) + __builtin_ia32_vcvtsh2si64_round (__A, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsh_u64 (__m128h __A) +{ + return (long long) + __builtin_ia32_vcvtsh2usi64_round (__A, _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsh_i64 (__m128h __A, const int __R) +{ + return (long long) __builtin_ia32_vcvtsh2si64_round (__A, __R); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsh_u64 (__m128h __A, const int __R) +{ + return (long long) __builtin_ia32_vcvtsh2usi64_round (__A, __R); +} + +#else +#define _mm_cvt_roundsh_i64(A, B) \ + ((long long)__builtin_ia32_vcvtsh2si64_round ((A), (B))) +#define _mm_cvt_roundsh_u64(A, B) \ + ((long long)__builtin_ia32_vcvtsh2usi64_round ((A), (B))) + +#endif /* __OPTIMIZE__ */ +#endif /* __x86_64__ */ + +/* Intrinsics vcvttsh2si, vcvttsh2us. */ +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsh_i32 (__m128h __A) +{ + return (int) + __builtin_ia32_vcvttsh2si32_round (__A, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline unsigned +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsh_u32 (__m128h __A) +{ + return (int) + __builtin_ia32_vcvttsh2usi32_round (__A, _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline int +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundsh_i32 (__m128h __A, const int __R) +{ + return (int) __builtin_ia32_vcvttsh2si32_round (__A, __R); +} + +extern __inline unsigned +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundsh_u32 (__m128h __A, const int __R) +{ + return (int) __builtin_ia32_vcvttsh2usi32_round (__A, __R); +} + +#else +#define _mm_cvtt_roundsh_i32(A, B) \ + ((int)__builtin_ia32_vcvttsh2si32_round ((A), (B))) +#define _mm_cvtt_roundsh_u32(A, B) \ + ((int)__builtin_ia32_vcvttsh2usi32_round ((A), (B))) + +#endif /* __OPTIMIZE__ */ + +#ifdef __x86_64__ +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsh_i64 (__m128h __A) +{ + return (long long) + __builtin_ia32_vcvttsh2si64_round (__A, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsh_u64 (__m128h __A) +{ + return (long long) + __builtin_ia32_vcvttsh2usi64_round (__A, _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundsh_i64 (__m128h __A, const int __R) +{ + return (long long) __builtin_ia32_vcvttsh2si64_round (__A, __R); +} + +extern __inline unsigned long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_roundsh_u64 (__m128h __A, const int __R) +{ + return (long long) __builtin_ia32_vcvttsh2usi64_round (__A, __R); +} + +#else +#define _mm_cvtt_roundsh_i64(A, B) \ + ((long long)__builtin_ia32_vcvttsh2si64_round ((A), (B))) +#define _mm_cvtt_roundsh_u64(A, B) \ + ((long long)__builtin_ia32_vcvttsh2usi64_round ((A), (B))) + +#endif /* __OPTIMIZE__ */ +#endif /* __x86_64__ */ + +/* Intrinsics vcvtsi2sh, vcvtusi2sh. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvti32_sh (__m128h __A, int __B) +{ + return __builtin_ia32_vcvtsi2sh32_round (__A, __B, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtu32_sh (__m128h __A, unsigned int __B) +{ + return __builtin_ia32_vcvtusi2sh32_round (__A, __B, _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundi32_sh (__m128h __A, int __B, const int __R) +{ + return __builtin_ia32_vcvtsi2sh32_round (__A, __B, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundu32_sh (__m128h __A, unsigned int __B, const int __R) +{ + return __builtin_ia32_vcvtusi2sh32_round (__A, __B, __R); +} + +#else +#define _mm_cvt_roundi32_sh(A, B, C) \ + (__builtin_ia32_vcvtsi2sh32_round ((A), (B), (C))) +#define _mm_cvt_roundu32_sh(A, B, C) \ + (__builtin_ia32_vcvtusi2sh32_round ((A), (B), (C))) + +#endif /* __OPTIMIZE__ */ + +#ifdef __x86_64__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvti64_sh (__m128h __A, long long __B) +{ + return __builtin_ia32_vcvtsi2sh64_round (__A, __B, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtu64_sh (__m128h __A, unsigned long long __B) +{ + return __builtin_ia32_vcvtusi2sh64_round (__A, __B, _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundi64_sh (__m128h __A, long long __B, const int __R) +{ + return __builtin_ia32_vcvtsi2sh64_round (__A, __B, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundu64_sh (__m128h __A, unsigned long long __B, const int __R) +{ + return __builtin_ia32_vcvtusi2sh64_round (__A, __B, __R); +} + +#else +#define _mm_cvt_roundi64_sh(A, B, C) \ + (__builtin_ia32_vcvtsi2sh64_round ((A), (B), (C))) +#define _mm_cvt_roundu64_sh(A, B, C) \ + (__builtin_ia32_vcvtusi2sh64_round ((A), (B), (C))) + +#endif /* __OPTIMIZE__ */ +#endif /* __x86_64__ */ + +/* Intrinsics vcvtph2pd. */ +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtph_pd (__m128h __A) +{ + return __builtin_ia32_vcvtph2pd512_mask_round (__A, + _mm512_setzero_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtph_pd (__m512d __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvtph2pd512_mask_round (__C, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtph_pd (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvtph2pd512_mask_round (__B, + _mm512_setzero_pd (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundph_pd (__m128h __A, int __B) +{ + return __builtin_ia32_vcvtph2pd512_mask_round (__A, + _mm512_setzero_pd (), + (__mmask8) -1, + __B); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundph_pd (__m512d __A, __mmask8 __B, __m128h __C, int __D) +{ + return __builtin_ia32_vcvtph2pd512_mask_round (__C, __A, __B, __D); +} + +extern __inline __m512d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundph_pd (__mmask8 __A, __m128h __B, int __C) +{ + return __builtin_ia32_vcvtph2pd512_mask_round (__B, + _mm512_setzero_pd (), + __A, + __C); +} + +#else +#define _mm512_cvt_roundph_pd(A, B) \ + (__builtin_ia32_vcvtph2pd512_mask_round ((A), \ + _mm512_setzero_pd (), \ + (__mmask8)-1, \ + (B))) + +#define _mm512_mask_cvt_roundph_pd(A, B, C, D) \ + (__builtin_ia32_vcvtph2pd512_mask_round ((C), (A), (B), (D))) + +#define _mm512_maskz_cvt_roundph_pd(A, B, C) \ + (__builtin_ia32_vcvtph2pd512_mask_round ((B), \ + _mm512_setzero_pd (), \ + (A), \ + (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtph2psx. */ +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtxph_ps (__m256h __A) +{ + return __builtin_ia32_vcvtph2psx512_mask_round (__A, + _mm512_setzero_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtxph_ps (__m512 __A, __mmask16 __B, __m256h __C) +{ + return __builtin_ia32_vcvtph2psx512_mask_round (__C, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtxph_ps (__mmask16 __A, __m256h __B) +{ + return __builtin_ia32_vcvtph2psx512_mask_round (__B, + _mm512_setzero_ps (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtx_roundph_ps (__m256h __A, int __B) +{ + return __builtin_ia32_vcvtph2psx512_mask_round (__A, + _mm512_setzero_ps (), + (__mmask16) -1, + __B); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtx_roundph_ps (__m512 __A, __mmask16 __B, __m256h __C, int __D) +{ + return __builtin_ia32_vcvtph2psx512_mask_round (__C, __A, __B, __D); +} + +extern __inline __m512 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtx_roundph_ps (__mmask16 __A, __m256h __B, int __C) +{ + return __builtin_ia32_vcvtph2psx512_mask_round (__B, + _mm512_setzero_ps (), + __A, + __C); +} + +#else +#define _mm512_cvtx_roundph_ps(A, B) \ + (__builtin_ia32_vcvtph2psx512_mask_round ((A), \ + _mm512_setzero_ps (), \ + (__mmask16)-1, \ + (B))) + +#define _mm512_mask_cvtx_roundph_ps(A, B, C, D) \ + (__builtin_ia32_vcvtph2psx512_mask_round ((C), (A), (B), (D))) + +#define _mm512_maskz_cvtx_roundph_ps(A, B, C) \ + (__builtin_ia32_vcvtph2psx512_mask_round ((B), \ + _mm512_setzero_ps (), \ + (A), \ + (C))) +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtps2ph. */ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtxps_ph (__m512 __A) +{ + return __builtin_ia32_vcvtps2phx512_mask_round ((__v16sf) __A, + _mm256_setzero_ph (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtxps_ph (__m256h __A, __mmask16 __B, __m512 __C) +{ + return __builtin_ia32_vcvtps2phx512_mask_round ((__v16sf) __C, + __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtxps_ph (__mmask16 __A, __m512 __B) +{ + return __builtin_ia32_vcvtps2phx512_mask_round ((__v16sf) __B, + _mm256_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtx_roundps_ph (__m512 __A, int __B) +{ + return __builtin_ia32_vcvtps2phx512_mask_round ((__v16sf) __A, + _mm256_setzero_ph (), + (__mmask16) -1, + __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtx_roundps_ph (__m256h __A, __mmask16 __B, __m512 __C, int __D) +{ + return __builtin_ia32_vcvtps2phx512_mask_round ((__v16sf) __C, + __A, __B, __D); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtx_roundps_ph (__mmask16 __A, __m512 __B, int __C) +{ + return __builtin_ia32_vcvtps2phx512_mask_round ((__v16sf) __B, + _mm256_setzero_ph (), + __A, __C); +} + +#else +#define _mm512_cvtx_roundps_ph(A, B) \ + (__builtin_ia32_vcvtps2phx512_mask_round ((__v16sf)(A), \ + _mm256_setzero_ph (),\ + (__mmask16)-1, (B))) + +#define _mm512_mask_cvtx_roundps_ph(A, B, C, D) \ + (__builtin_ia32_vcvtps2phx512_mask_round ((__v16sf)(C), \ + (A), (B), (D))) + +#define _mm512_maskz_cvtx_roundps_ph(A, B, C) \ + (__builtin_ia32_vcvtps2phx512_mask_round ((__v16sf)(B), \ + _mm256_setzero_ph (),\ + (A), (C))) +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtpd2ph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvtpd_ph (__m512d __A) +{ + return __builtin_ia32_vcvtpd2ph512_mask_round ((__v8df) __A, + _mm_setzero_ph (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvtpd_ph (__m128h __A, __mmask8 __B, __m512d __C) +{ + return __builtin_ia32_vcvtpd2ph512_mask_round ((__v8df) __C, + __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvtpd_ph (__mmask8 __A, __m512d __B) +{ + return __builtin_ia32_vcvtpd2ph512_mask_round ((__v8df) __B, + _mm_setzero_ph (), + __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_cvt_roundpd_ph (__m512d __A, int __B) +{ + return __builtin_ia32_vcvtpd2ph512_mask_round ((__v8df) __A, + _mm_setzero_ph (), + (__mmask8) -1, + __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_cvt_roundpd_ph (__m128h __A, __mmask8 __B, __m512d __C, int __D) +{ + return __builtin_ia32_vcvtpd2ph512_mask_round ((__v8df) __C, + __A, __B, __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_cvt_roundpd_ph (__mmask8 __A, __m512d __B, int __C) +{ + return __builtin_ia32_vcvtpd2ph512_mask_round ((__v8df) __B, + _mm_setzero_ph (), + __A, __C); +} + +#else +#define _mm512_cvt_roundpd_ph(A, B) \ + (__builtin_ia32_vcvtpd2ph512_mask_round ((__v8df)(A), \ + _mm_setzero_ph (), \ + (__mmask8)-1, (B))) + +#define _mm512_mask_cvt_roundpd_ph(A, B, C, D) \ + (__builtin_ia32_vcvtpd2ph512_mask_round ((__v8df)(C), \ + (A), (B), (D))) + +#define _mm512_maskz_cvt_roundpd_ph(A, B, C) \ + (__builtin_ia32_vcvtpd2ph512_mask_round ((__v8df)(B), \ + _mm_setzero_ph (), \ + (A), (C))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtsh2ss, vcvtsh2sd. */ +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsh_ss (__m128 __A, __m128h __B) +{ + return __builtin_ia32_vcvtsh2ss_mask_round (__B, __A, + _mm_setzero_ps (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsh_ss (__m128 __A, __mmask8 __B, __m128 __C, + __m128h __D) +{ + return __builtin_ia32_vcvtsh2ss_mask_round (__D, __C, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtsh_ss (__mmask8 __A, __m128 __B, + __m128h __C) +{ + return __builtin_ia32_vcvtsh2ss_mask_round (__C, __B, + _mm_setzero_ps (), + __A, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsh_sd (__m128d __A, __m128h __B) +{ + return __builtin_ia32_vcvtsh2sd_mask_round (__B, __A, + _mm_setzero_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsh_sd (__m128d __A, __mmask8 __B, __m128d __C, + __m128h __D) +{ + return __builtin_ia32_vcvtsh2sd_mask_round (__D, __C, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtsh_sd (__mmask8 __A, __m128d __B, __m128h __C) +{ + return __builtin_ia32_vcvtsh2sd_mask_round (__C, __B, + _mm_setzero_pd (), + __A, _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsh_ss (__m128 __A, __m128h __B, const int __R) +{ + return __builtin_ia32_vcvtsh2ss_mask_round (__B, __A, + _mm_setzero_ps (), + (__mmask8) -1, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvt_roundsh_ss (__m128 __A, __mmask8 __B, __m128 __C, + __m128h __D, const int __R) +{ + return __builtin_ia32_vcvtsh2ss_mask_round (__D, __C, __A, __B, __R); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvt_roundsh_ss (__mmask8 __A, __m128 __B, + __m128h __C, const int __R) +{ + return __builtin_ia32_vcvtsh2ss_mask_round (__C, __B, + _mm_setzero_ps (), + __A, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsh_sd (__m128d __A, __m128h __B, const int __R) +{ + return __builtin_ia32_vcvtsh2sd_mask_round (__B, __A, + _mm_setzero_pd (), + (__mmask8) -1, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvt_roundsh_sd (__m128d __A, __mmask8 __B, __m128d __C, + __m128h __D, const int __R) +{ + return __builtin_ia32_vcvtsh2sd_mask_round (__D, __C, __A, __B, __R); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvt_roundsh_sd (__mmask8 __A, __m128d __B, __m128h __C, const int __R) +{ + return __builtin_ia32_vcvtsh2sd_mask_round (__C, __B, + _mm_setzero_pd (), + __A, __R); +} + +#else +#define _mm_cvt_roundsh_ss(A, B, R) \ + (__builtin_ia32_vcvtsh2ss_mask_round ((B), (A), \ + _mm_setzero_ps (), \ + (__mmask8) -1, (R))) + +#define _mm_mask_cvt_roundsh_ss(A, B, C, D, R) \ + (__builtin_ia32_vcvtsh2ss_mask_round ((D), (C), (A), (B), (R))) + +#define _mm_maskz_cvt_roundsh_ss(A, B, C, R) \ + (__builtin_ia32_vcvtsh2ss_mask_round ((C), (B), \ + _mm_setzero_ps (), \ + (A), (R))) + +#define _mm_cvt_roundsh_sd(A, B, R) \ + (__builtin_ia32_vcvtsh2sd_mask_round ((B), (A), \ + _mm_setzero_pd (), \ + (__mmask8) -1, (R))) + +#define _mm_mask_cvt_roundsh_sd(A, B, C, D, R) \ + (__builtin_ia32_vcvtsh2sd_mask_round ((D), (C), (A), (B), (R))) + +#define _mm_maskz_cvt_roundsh_sd(A, B, C, R) \ + (__builtin_ia32_vcvtsh2sd_mask_round ((C), (B), \ + _mm_setzero_pd (), \ + (A), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtss2sh, vcvtsd2sh. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_sh (__m128h __A, __m128 __B) +{ + return __builtin_ia32_vcvtss2sh_mask_round (__B, __A, + _mm_setzero_ph (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtss_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128 __D) +{ + return __builtin_ia32_vcvtss2sh_mask_round (__D, __C, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtss_sh (__mmask8 __A, __m128h __B, __m128 __C) +{ + return __builtin_ia32_vcvtss2sh_mask_round (__C, __B, + _mm_setzero_ph (), + __A, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_sh (__m128h __A, __m128d __B) +{ + return __builtin_ia32_vcvtsd2sh_mask_round (__B, __A, + _mm_setzero_ph (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsd_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128d __D) +{ + return __builtin_ia32_vcvtsd2sh_mask_round (__D, __C, __A, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtsd_sh (__mmask8 __A, __m128h __B, __m128d __C) +{ + return __builtin_ia32_vcvtsd2sh_mask_round (__C, __B, + _mm_setzero_ph (), + __A, _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundss_sh (__m128h __A, __m128 __B, const int __R) +{ + return __builtin_ia32_vcvtss2sh_mask_round (__B, __A, + _mm_setzero_ph (), + (__mmask8) -1, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvt_roundss_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128 __D, + const int __R) +{ + return __builtin_ia32_vcvtss2sh_mask_round (__D, __C, __A, __B, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvt_roundss_sh (__mmask8 __A, __m128h __B, __m128 __C, + const int __R) +{ + return __builtin_ia32_vcvtss2sh_mask_round (__C, __B, + _mm_setzero_ph (), + __A, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_roundsd_sh (__m128h __A, __m128d __B, const int __R) +{ + return __builtin_ia32_vcvtsd2sh_mask_round (__B, __A, + _mm_setzero_ph (), + (__mmask8) -1, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvt_roundsd_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128d __D, + const int __R) +{ + return __builtin_ia32_vcvtsd2sh_mask_round (__D, __C, __A, __B, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvt_roundsd_sh (__mmask8 __A, __m128h __B, __m128d __C, + const int __R) +{ + return __builtin_ia32_vcvtsd2sh_mask_round (__C, __B, + _mm_setzero_ph (), + __A, __R); +} + +#else +#define _mm_cvt_roundss_sh(A, B, R) \ + (__builtin_ia32_vcvtss2sh_mask_round ((B), (A), \ + _mm_setzero_ph (), \ + (__mmask8) -1, R)) + +#define _mm_mask_cvt_roundss_sh(A, B, C, D, R) \ + (__builtin_ia32_vcvtss2sh_mask_round ((D), (C), (A), (B), (R))) + +#define _mm_maskz_cvt_roundss_sh(A, B, C, R) \ + (__builtin_ia32_vcvtss2sh_mask_round ((C), (B), \ + _mm_setzero_ph (), \ + A, R)) + +#define _mm_cvt_roundsd_sh(A, B, R) \ + (__builtin_ia32_vcvtsd2sh_mask_round ((B), (A), \ + _mm_setzero_ph (), \ + (__mmask8) -1, R)) + +#define _mm_mask_cvt_roundsd_sh(A, B, C, D, R) \ + (__builtin_ia32_vcvtsd2sh_mask_round ((D), (C), (A), (B), (R))) + +#define _mm_maskz_cvt_roundsd_sh(A, B, C, R) \ + (__builtin_ia32_vcvtsd2sh_mask_round ((C), (B), \ + _mm_setzero_ph (), \ + (A), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfmaddsub[132,213,231]ph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmaddsub_ph (__m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmaddsubph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmaddsub_ph (__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmaddsubph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmaddsub_ph (__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) +{ + return (__m512h) + __builtin_ia32_vfmaddsubph512_mask3 ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmaddsub_ph (__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmaddsubph512_maskz ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmaddsub_round_ph (__m512h __A, __m512h __B, __m512h __C, const int __R) +{ + return (__m512h) + __builtin_ia32_vfmaddsubph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) -1, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmaddsub_round_ph (__m512h __A, __mmask32 __U, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) + __builtin_ia32_vfmaddsubph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmaddsub_round_ph (__m512h __A, __m512h __B, __m512h __C, + __mmask32 __U, const int __R) +{ + return (__m512h) + __builtin_ia32_vfmaddsubph512_mask3 ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmaddsub_round_ph (__mmask32 __U, __m512h __A, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) + __builtin_ia32_vfmaddsubph512_maskz ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +#else +#define _mm512_fmaddsub_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask ((A), (B), (C), -1, (R))) + +#define _mm512_mask_fmaddsub_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask ((A), (B), (C), (U), (R))) + +#define _mm512_mask3_fmaddsub_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask3 ((A), (B), (C), (U), (R))) + +#define _mm512_maskz_fmaddsub_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_maskz ((A), (B), (C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfmsubadd[132,213,231]ph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) + _mm512_fmsubadd_ph (__m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmsubaddph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmsubadd_ph (__m512h __A, __mmask32 __U, + __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmsubaddph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmsubadd_ph (__m512h __A, __m512h __B, + __m512h __C, __mmask32 __U) +{ + return (__m512h) + __builtin_ia32_vfmsubaddph512_mask3 ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmsubadd_ph (__mmask32 __U, __m512h __A, + __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmsubaddph512_maskz ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmsubadd_round_ph (__m512h __A, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) + __builtin_ia32_vfmsubaddph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) -1, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmsubadd_round_ph (__m512h __A, __mmask32 __U, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) + __builtin_ia32_vfmsubaddph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmsubadd_round_ph (__m512h __A, __m512h __B, __m512h __C, + __mmask32 __U, const int __R) +{ + return (__m512h) + __builtin_ia32_vfmsubaddph512_mask3 ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmsubadd_round_ph (__mmask32 __U, __m512h __A, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) + __builtin_ia32_vfmsubaddph512_maskz ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +#else +#define _mm512_fmsubadd_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmsubaddph512_mask ((A), (B), (C), -1, (R))) + +#define _mm512_mask_fmsubadd_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmsubaddph512_mask ((A), (B), (C), (U), (R))) + +#define _mm512_mask3_fmsubadd_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmsubaddph512_mask3 ((A), (B), (C), (U), (R))) + +#define _mm512_maskz_fmsubadd_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmsubaddph512_maskz ((A), (B), (C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfmadd[132,213,231]ph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) + _mm512_fmadd_ph (__m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmaddph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmadd_ph (__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmaddph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmadd_ph (__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) +{ + return (__m512h) + __builtin_ia32_vfmaddph512_mask3 ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmadd_ph (__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmaddph512_maskz ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmadd_round_ph (__m512h __A, __m512h __B, __m512h __C, const int __R) +{ + return (__m512h) __builtin_ia32_vfmaddph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) -1, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmadd_round_ph (__m512h __A, __mmask32 __U, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) __builtin_ia32_vfmaddph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmadd_round_ph (__m512h __A, __m512h __B, __m512h __C, + __mmask32 __U, const int __R) +{ + return (__m512h) __builtin_ia32_vfmaddph512_mask3 ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmadd_round_ph (__mmask32 __U, __m512h __A, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) __builtin_ia32_vfmaddph512_maskz ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +#else +#define _mm512_fmadd_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask ((A), (B), (C), -1, (R))) + +#define _mm512_mask_fmadd_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask ((A), (B), (C), (U), (R))) + +#define _mm512_mask3_fmadd_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask3 ((A), (B), (C), (U), (R))) + +#define _mm512_maskz_fmadd_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_maskz ((A), (B), (C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfnmadd[132,213,231]ph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fnmadd_ph (__m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfnmaddph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fnmadd_ph (__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfnmaddph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fnmadd_ph (__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) +{ + return (__m512h) + __builtin_ia32_vfnmaddph512_mask3 ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fnmadd_ph (__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfnmaddph512_maskz ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fnmadd_round_ph (__m512h __A, __m512h __B, __m512h __C, const int __R) +{ + return (__m512h) __builtin_ia32_vfnmaddph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) -1, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fnmadd_round_ph (__m512h __A, __mmask32 __U, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) __builtin_ia32_vfnmaddph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fnmadd_round_ph (__m512h __A, __m512h __B, __m512h __C, + __mmask32 __U, const int __R) +{ + return (__m512h) __builtin_ia32_vfnmaddph512_mask3 ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fnmadd_round_ph (__mmask32 __U, __m512h __A, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) __builtin_ia32_vfnmaddph512_maskz ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +#else +#define _mm512_fnmadd_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfnmaddph512_mask ((A), (B), (C), -1, (R))) + +#define _mm512_mask_fnmadd_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfnmaddph512_mask ((A), (B), (C), (U), (R))) + +#define _mm512_mask3_fnmadd_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfnmaddph512_mask3 ((A), (B), (C), (U), (R))) + +#define _mm512_maskz_fnmadd_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfnmaddph512_maskz ((A), (B), (C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfmsub[132,213,231]ph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmsub_ph (__m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmsubph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmsub_ph (__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmsubph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmsub_ph (__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) +{ + return (__m512h) + __builtin_ia32_vfmsubph512_mask3 ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmsub_ph (__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmsubph512_maskz ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmsub_round_ph (__m512h __A, __m512h __B, __m512h __C, const int __R) +{ + return (__m512h) __builtin_ia32_vfmsubph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) -1, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmsub_round_ph (__m512h __A, __mmask32 __U, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) __builtin_ia32_vfmsubph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmsub_round_ph (__m512h __A, __m512h __B, __m512h __C, + __mmask32 __U, const int __R) +{ + return (__m512h) __builtin_ia32_vfmsubph512_mask3 ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmsub_round_ph (__mmask32 __U, __m512h __A, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) __builtin_ia32_vfmsubph512_maskz ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +#else +#define _mm512_fmsub_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmsubph512_mask ((A), (B), (C), -1, (R))) + +#define _mm512_mask_fmsub_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmsubph512_mask ((A), (B), (C), (U), (R))) + +#define _mm512_mask3_fmsub_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmsubph512_mask3 ((A), (B), (C), (U), (R))) + +#define _mm512_maskz_fmsub_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmsubph512_maskz ((A), (B), (C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfnmsub[132,213,231]ph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fnmsub_ph (__m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfnmsubph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fnmsub_ph (__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfnmsubph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fnmsub_ph (__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) +{ + return (__m512h) + __builtin_ia32_vfnmsubph512_mask3 ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fnmsub_ph (__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfnmsubph512_maskz ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fnmsub_round_ph (__m512h __A, __m512h __B, __m512h __C, const int __R) +{ + return (__m512h) __builtin_ia32_vfnmsubph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) -1, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fnmsub_round_ph (__m512h __A, __mmask32 __U, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) __builtin_ia32_vfnmsubph512_mask ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fnmsub_round_ph (__m512h __A, __m512h __B, __m512h __C, + __mmask32 __U, const int __R) +{ + return (__m512h) __builtin_ia32_vfnmsubph512_mask3 ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fnmsub_round_ph (__mmask32 __U, __m512h __A, __m512h __B, + __m512h __C, const int __R) +{ + return (__m512h) __builtin_ia32_vfnmsubph512_maskz ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + (__mmask32) __U, __R); +} + +#else +#define _mm512_fnmsub_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfnmsubph512_mask ((A), (B), (C), -1, (R))) + +#define _mm512_mask_fnmsub_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfnmsubph512_mask ((A), (B), (C), (U), (R))) + +#define _mm512_mask3_fnmsub_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfnmsubph512_mask3 ((A), (B), (C), (U), (R))) + +#define _mm512_maskz_fnmsub_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfnmsubph512_maskz ((A), (B), (C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfmadd[132,213,231]sh. */ +extern __inline __m128h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_sh (__m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask3 ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B, + const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U, + const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask3 ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_round_sh (__mmask8 __U, __m128h __W, __m128h __A, + __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +#else +#define _mm_fmadd_round_sh(A, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), (C), (-1), (R))) +#define _mm_mask_fmadd_round_sh(A, U, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), (C), (U), (R))) +#define _mm_mask3_fmadd_round_sh(A, B, C, U, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask3 ((A), (B), (C), (U), (R))) +#define _mm_maskz_fmadd_round_sh(U, A, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_maskz ((A), (B), (C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfnmadd[132,213,231]sh. */ +extern __inline __m128h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmadd_sh (__m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmadd_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmadd_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_mask3 ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmadd_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_maskz ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmadd_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B, + const int __R) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmadd_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U, + const int __R) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_mask3 ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmadd_round_sh (__mmask8 __U, __m128h __W, __m128h __A, + __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfnmaddsh3_maskz ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +#else +#define _mm_fnmadd_round_sh(A, B, C, R) \ + ((__m128h) __builtin_ia32_vfnmaddsh3_mask ((A), (B), (C), (-1), (R))) +#define _mm_mask_fnmadd_round_sh(A, U, B, C, R) \ + ((__m128h) __builtin_ia32_vfnmaddsh3_mask ((A), (B), (C), (U), (R))) +#define _mm_mask3_fnmadd_round_sh(A, B, C, U, R) \ + ((__m128h) __builtin_ia32_vfnmaddsh3_mask3 ((A), (B), (C), (U), (R))) +#define _mm_maskz_fnmadd_round_sh(U, A, B, C, R) \ + ((__m128h) __builtin_ia32_vfnmaddsh3_maskz ((A), (B), (C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfmsub[132,213,231]sh. */ +extern __inline __m128h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsub_sh (__m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + -(__v8hf) __B, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsub_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsub_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsub_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W, + (__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + -(__v8hf) __B, + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsub_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B, + const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + (__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U, + const int __R) +{ + return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W, + (__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsub_round_sh (__mmask8 __U, __m128h __W, __m128h __A, + __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W, + (__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, __R); +} + +#else +#define _mm_fmsub_round_sh(A, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), -(C), (-1), (R))) +#define _mm_mask_fmsub_round_sh(A, U, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), (B), -(C), (U), (R))) +#define _mm_mask3_fmsub_round_sh(A, B, C, U, R) \ + ((__m128h) __builtin_ia32_vfmsubsh3_mask3 ((A), (B), (C), (U), (R))) +#define _mm_maskz_fmsub_round_sh(U, A, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_maskz ((A), (B), -(C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfnmsub[132,213,231]sh. */ +extern __inline __m128h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmsub_sh (__m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + -(__v8hf) __A, + -(__v8hf) __B, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmsub_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + -(__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmsub_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W, + -(__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmsub_sh (__mmask8 __U, __m128h __W, __m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W, + -(__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + -(__v8hf) __A, + -(__v8hf) __B, + (__mmask8) -1, + __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmsub_round_sh (__m128h __W, __mmask8 __U, __m128h __A, __m128h __B, + const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_mask ((__v8hf) __W, + -(__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmsub_round_sh (__m128h __W, __m128h __A, __m128h __B, __mmask8 __U, + const int __R) +{ + return (__m128h) __builtin_ia32_vfmsubsh3_mask3 ((__v8hf) __W, + -(__v8hf) __A, + (__v8hf) __B, + (__mmask8) __U, __R); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmsub_round_sh (__mmask8 __U, __m128h __W, __m128h __A, + __m128h __B, const int __R) +{ + return (__m128h) __builtin_ia32_vfmaddsh3_maskz ((__v8hf) __W, + -(__v8hf) __A, + -(__v8hf) __B, + (__mmask8) __U, __R); +} + +#else +#define _mm_fnmsub_round_sh(A, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), -(B), -(C), (-1), (R))) +#define _mm_mask_fnmsub_round_sh(A, U, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_mask ((A), -(B), -(C), (U), (R))) +#define _mm_mask3_fnmsub_round_sh(A, B, C, U, R) \ + ((__m128h) __builtin_ia32_vfmsubsh3_mask3 ((A), -(B), (C), (U), (R))) +#define _mm_maskz_fnmsub_round_sh(U, A, B, C, R) \ + ((__m128h) __builtin_ia32_vfmaddsh3_maskz ((A), -(B), -(C), (U), (R))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vf[,c]maddcph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fcmadd_pch (__m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfcmaddcph512_round ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fcmadd_pch (__m512h __A, __mmask16 __B, __m512h __C, __m512h __D) +{ + return (__m512h) + __builtin_ia32_vfcmaddcph512_mask_round ((__v32hf) __A, + (__v32hf) __C, + (__v32hf) __D, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fcmadd_pch (__m512h __A, __m512h __B, __m512h __C, __mmask16 __D) +{ + return (__m512h) + __builtin_ia32_vfcmaddcph512_mask3_round ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + __D, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fcmadd_pch (__mmask16 __A, __m512h __B, __m512h __C, __m512h __D) +{ + return (__m512h) + __builtin_ia32_vfcmaddcph512_maskz_round ((__v32hf) __B, + (__v32hf) __C, + (__v32hf) __D, + __A, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmadd_pch (__m512h __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmaddcph512_round ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmadd_pch (__m512h __A, __mmask16 __B, __m512h __C, __m512h __D) +{ + return (__m512h) + __builtin_ia32_vfmaddcph512_mask_round ((__v32hf) __A, + (__v32hf) __C, + (__v32hf) __D, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmadd_pch (__m512h __A, __m512h __B, __m512h __C, __mmask16 __D) +{ + return (__m512h) + __builtin_ia32_vfmaddcph512_mask3_round ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + __D, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmadd_pch (__mmask16 __A, __m512h __B, __m512h __C, __m512h __D) +{ + return (__m512h) + __builtin_ia32_vfmaddcph512_maskz_round ((__v32hf) __B, + (__v32hf) __C, + (__v32hf) __D, + __A, _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fcmadd_round_pch (__m512h __A, __m512h __B, __m512h __C, const int __D) +{ + return (__m512h) + __builtin_ia32_vfcmaddcph512_round ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + __D); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fcmadd_round_pch (__m512h __A, __mmask16 __B, __m512h __C, + __m512h __D, const int __E) +{ + return (__m512h) + __builtin_ia32_vfcmaddcph512_mask_round ((__v32hf) __A, + (__v32hf) __C, + (__v32hf) __D, __B, + __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fcmadd_round_pch (__m512h __A, __m512h __B, __m512h __C, + __mmask16 __D, const int __E) +{ + return (__m512h) + __builtin_ia32_vfcmaddcph512_mask3_round ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + __D, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fcmadd_round_pch (__mmask16 __A, __m512h __B, __m512h __C, + __m512h __D, const int __E) +{ + return (__m512h) + __builtin_ia32_vfcmaddcph512_maskz_round ((__v32hf) __B, + (__v32hf) __C, + (__v32hf) __D, + __A, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmadd_round_pch (__m512h __A, __m512h __B, __m512h __C, const int __D) +{ + return (__m512h) + __builtin_ia32_vfmaddcph512_round ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + __D); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmadd_round_pch (__m512h __A, __mmask16 __B, __m512h __C, + __m512h __D, const int __E) +{ + return (__m512h) + __builtin_ia32_vfmaddcph512_mask_round ((__v32hf) __A, + (__v32hf) __C, + (__v32hf) __D, __B, + __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask3_fmadd_round_pch (__m512h __A, __m512h __B, __m512h __C, + __mmask16 __D, const int __E) +{ + return (__m512h) + __builtin_ia32_vfmaddcph512_mask3_round ((__v32hf) __A, + (__v32hf) __B, + (__v32hf) __C, + __D, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmadd_round_pch (__mmask16 __A, __m512h __B, __m512h __C, + __m512h __D, const int __E) +{ + return (__m512h) + __builtin_ia32_vfmaddcph512_maskz_round ((__v32hf) __B, + (__v32hf) __C, + (__v32hf) __D, + __A, __E); +} + +#else +#define _mm512_fcmadd_round_pch(A, B, C, D) \ + (__m512h) __builtin_ia32_vfcmaddcph512_round ((A), (B), (C), (D)) + +#define _mm512_mask_fcmadd_round_pch(A, B, C, D, E) \ + ((__m512h) \ + __builtin_ia32_vfcmaddcph512_mask_round ((__v32hf) (A), \ + (__v32hf) (C), \ + (__v32hf) (D), \ + (B), (E))) + + +#define _mm512_mask3_fcmadd_round_pch(A, B, C, D, E) \ + ((__m512h) \ + __builtin_ia32_vfcmaddcph512_mask3_round ((A), (B), (C), (D), (E))) + +#define _mm512_maskz_fcmadd_round_pch(A, B, C, D, E) \ + (__m512h) \ + __builtin_ia32_vfcmaddcph512_maskz_round ((B), (C), (D), (A), (E)) + +#define _mm512_fmadd_round_pch(A, B, C, D) \ + (__m512h) __builtin_ia32_vfmaddcph512_round ((A), (B), (C), (D)) + +#define _mm512_mask_fmadd_round_pch(A, B, C, D, E) \ + ((__m512h) \ + __builtin_ia32_vfmaddcph512_mask_round ((__v32hf) (A), \ + (__v32hf) (C), \ + (__v32hf) (D), \ + (B), (E))) + +#define _mm512_mask3_fmadd_round_pch(A, B, C, D, E) \ + (__m512h) \ + __builtin_ia32_vfmaddcph512_mask3_round ((A), (B), (C), (D), (E)) + +#define _mm512_maskz_fmadd_round_pch(A, B, C, D, E) \ + (__m512h) \ + __builtin_ia32_vfmaddcph512_maskz_round ((B), (C), (D), (A), (E)) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vf[,c]mulcph. */ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fcmul_pch (__m512h __A, __m512h __B) +{ + return (__m512h) + __builtin_ia32_vfcmulcph512_round ((__v32hf) __A, + (__v32hf) __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fcmul_pch (__m512h __A, __mmask16 __B, __m512h __C, __m512h __D) +{ + return (__m512h) + __builtin_ia32_vfcmulcph512_mask_round ((__v32hf) __C, + (__v32hf) __D, + (__v32hf) __A, + __B, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fcmul_pch (__mmask16 __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfcmulcph512_mask_round ((__v32hf) __B, + (__v32hf) __C, + _mm512_setzero_ph (), + __A, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmul_pch (__m512h __A, __m512h __B) +{ + return (__m512h) + __builtin_ia32_vfmulcph512_round ((__v32hf) __A, + (__v32hf) __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmul_pch (__m512h __A, __mmask16 __B, __m512h __C, __m512h __D) +{ + return (__m512h) + __builtin_ia32_vfmulcph512_mask_round ((__v32hf) __C, + (__v32hf) __D, + (__v32hf) __A, + __B, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmul_pch (__mmask16 __A, __m512h __B, __m512h __C) +{ + return (__m512h) + __builtin_ia32_vfmulcph512_mask_round ((__v32hf) __B, + (__v32hf) __C, + _mm512_setzero_ph (), + __A, _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fcmul_round_pch (__m512h __A, __m512h __B, const int __D) +{ + return (__m512h) + __builtin_ia32_vfcmulcph512_round ((__v32hf) __A, + (__v32hf) __B, __D); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fcmul_round_pch (__m512h __A, __mmask16 __B, __m512h __C, + __m512h __D, const int __E) +{ + return (__m512h) + __builtin_ia32_vfcmulcph512_mask_round ((__v32hf) __C, + (__v32hf) __D, + (__v32hf) __A, + __B, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fcmul_round_pch (__mmask16 __A, __m512h __B, + __m512h __C, const int __E) +{ + return (__m512h) + __builtin_ia32_vfcmulcph512_mask_round ((__v32hf) __B, + (__v32hf) __C, + _mm512_setzero_ph (), + __A, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_fmul_round_pch (__m512h __A, __m512h __B, const int __D) +{ + return (__m512h) + __builtin_ia32_vfmulcph512_round ((__v32hf) __A, + (__v32hf) __B, + __D); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_fmul_round_pch (__m512h __A, __mmask16 __B, __m512h __C, + __m512h __D, const int __E) +{ + return (__m512h) + __builtin_ia32_vfmulcph512_mask_round ((__v32hf) __C, + (__v32hf) __D, + (__v32hf) __A, + __B, __E); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_fmul_round_pch (__mmask16 __A, __m512h __B, + __m512h __C, const int __E) +{ + return (__m512h) + __builtin_ia32_vfmulcph512_mask_round ((__v32hf) __B, + (__v32hf) __C, + _mm512_setzero_ph (), + __A, __E); +} + +#else +#define _mm512_fcmul_round_pch(A, B, D) \ + (__m512h) __builtin_ia32_vfcmulcph512_round ((A), (B), (D)) + +#define _mm512_mask_fcmul_round_pch(A, B, C, D, E) \ + (__m512h) __builtin_ia32_vfcmulcph512_mask_round ((C), (D), (A), (B), (E)) + +#define _mm512_maskz_fcmul_round_pch(A, B, C, E) \ + (__m512h) __builtin_ia32_vfcmulcph512_mask_round ((B), (C), \ + (__v32hf) \ + _mm512_setzero_ph (), \ + (A), (E)) + +#define _mm512_fmul_round_pch(A, B, D) \ + (__m512h) __builtin_ia32_vfmulcph512_round ((A), (B), (D)) + +#define _mm512_mask_fmul_round_pch(A, B, C, D, E) \ + (__m512h) __builtin_ia32_vfmulcph512_mask_round ((C), (D), (A), (B), (E)) + +#define _mm512_maskz_fmul_round_pch(A, B, C, E) \ + (__m512h) __builtin_ia32_vfmulcph512_mask_round ((B), (C), \ + (__v32hf) \ + _mm512_setzero_ph (), \ + (A), (E)) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vf[,c]maddcsh. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fcmadd_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return (__m128h) + __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A, + (__v8hf) __C, + (__v8hf) __D, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fcmadd_sch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D) +{ + return (__m128h) + __builtin_ia32_vfcmaddcsh_mask3_round ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, __D, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fcmadd_sch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D) +{ + return (__m128h) + __builtin_ia32_vfcmaddcsh_maskz_round ((__v8hf) __B, + (__v8hf) __C, + (__v8hf) __D, + __A, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fcmadd_sch (__m128h __A, __m128h __B, __m128h __C) +{ + return (__m128h) + __builtin_ia32_vfcmaddcsh_round ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return (__m128h) + __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A, + (__v8hf) __C, + (__v8hf) __D, __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_sch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D) +{ + return (__m128h) + __builtin_ia32_vfmaddcsh_mask3_round ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, __D, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_sch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D) +{ + return (__m128h) + __builtin_ia32_vfmaddcsh_maskz_round ((__v8hf) __B, + (__v8hf) __C, + (__v8hf) __D, + __A, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_sch (__m128h __A, __m128h __B, __m128h __C) +{ + return (__m128h) + __builtin_ia32_vfmaddcsh_round ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fcmadd_round_sch (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, const int __E) +{ + return (__m128h) + __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) __A, + (__v8hf) __C, + (__v8hf) __D, + __B, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fcmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, + __mmask8 __D, const int __E) +{ + return (__m128h) + __builtin_ia32_vfcmaddcsh_mask3_round ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + __D, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fcmadd_round_sch (__mmask8 __A, __m128h __B, __m128h __C, + __m128h __D, const int __E) +{ + return (__m128h) + __builtin_ia32_vfcmaddcsh_maskz_round ((__v8hf) __B, + (__v8hf) __C, + (__v8hf) __D, + __A, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fcmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, const int __D) +{ + return (__m128h) + __builtin_ia32_vfcmaddcsh_round ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_round_sch (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, const int __E) +{ + return (__m128h) + __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) __A, + (__v8hf) __C, + (__v8hf) __D, + __B, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, + __mmask8 __D, const int __E) +{ + return (__m128h) + __builtin_ia32_vfmaddcsh_mask3_round ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + __D, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_round_sch (__mmask8 __A, __m128h __B, __m128h __C, + __m128h __D, const int __E) +{ + return (__m128h) + __builtin_ia32_vfmaddcsh_maskz_round ((__v8hf) __B, + (__v8hf) __C, + (__v8hf) __D, + __A, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_round_sch (__m128h __A, __m128h __B, __m128h __C, const int __D) +{ + return (__m128h) + __builtin_ia32_vfmaddcsh_round ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + __D); +} +#else +#define _mm_mask_fcmadd_round_sch(A, B, C, D, E) \ + ((__m128h) \ + __builtin_ia32_vfcmaddcsh_mask_round ((__v8hf) (A), \ + (__v8hf) (C), \ + (__v8hf) (D), \ + (B), (E))) + + +#define _mm_mask3_fcmadd_round_sch(A, B, C, D, E) \ + ((__m128h) \ + __builtin_ia32_vfcmaddcsh_mask3_round ((__v8hf) (A), \ + (__v8hf) (B), \ + (__v8hf) (C), \ + (D), (E))) + +#define _mm_maskz_fcmadd_round_sch(A, B, C, D, E) \ + __builtin_ia32_vfcmaddcsh_maskz_round ((B), (C), (D), (A), (E)) + +#define _mm_fcmadd_round_sch(A, B, C, D) \ + __builtin_ia32_vfcmaddcsh_round ((A), (B), (C), (D)) + +#define _mm_mask_fmadd_round_sch(A, B, C, D, E) \ + ((__m128h) \ + __builtin_ia32_vfmaddcsh_mask_round ((__v8hf) (A), \ + (__v8hf) (C), \ + (__v8hf) (D), \ + (B), (E))) + +#define _mm_mask3_fmadd_round_sch(A, B, C, D, E) \ + ((__m128h) \ + __builtin_ia32_vfmaddcsh_mask3_round ((__v8hf) (A), \ + (__v8hf) (B), \ + (__v8hf) (C), \ + (D), (E))) + +#define _mm_maskz_fmadd_round_sch(A, B, C, D, E) \ + __builtin_ia32_vfmaddcsh_maskz_round ((B), (C), (D), (A), (E)) + +#define _mm_fmadd_round_sch(A, B, C, D) \ + __builtin_ia32_vfmaddcsh_round ((A), (B), (C), (D)) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vf[,c]mulcsh. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fcmul_sch (__m128h __A, __m128h __B) +{ + return (__m128h) + __builtin_ia32_vfcmulcsh_round ((__v8hf) __A, + (__v8hf) __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fcmul_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return (__m128h) + __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C, + (__v8hf) __D, + (__v8hf) __A, + __B, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fcmul_sch (__mmask8 __A, __m128h __B, __m128h __C) +{ + return (__m128h) + __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B, + (__v8hf) __C, + _mm_setzero_ph (), + __A, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmul_sch (__m128h __A, __m128h __B) +{ + return (__m128h) + __builtin_ia32_vfmulcsh_round ((__v8hf) __A, + (__v8hf) __B, + _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmul_sch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return (__m128h) + __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C, + (__v8hf) __D, + (__v8hf) __A, + __B, _MM_FROUND_CUR_DIRECTION); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmul_sch (__mmask8 __A, __m128h __B, __m128h __C) +{ + return (__m128h) + __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B, + (__v8hf) __C, + _mm_setzero_ph (), + __A, _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fcmul_round_sch (__m128h __A, __m128h __B, const int __D) +{ + return (__m128h) + __builtin_ia32_vfcmulcsh_round ((__v8hf) __A, + (__v8hf) __B, + __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fcmul_round_sch (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, const int __E) +{ + return (__m128h) + __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C, + (__v8hf) __D, + (__v8hf) __A, + __B, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fcmul_round_sch (__mmask8 __A, __m128h __B, __m128h __C, + const int __E) +{ + return (__m128h) + __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B, + (__v8hf) __C, + _mm_setzero_ph (), + __A, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmul_round_sch (__m128h __A, __m128h __B, const int __D) +{ + return (__m128h) + __builtin_ia32_vfmulcsh_round ((__v8hf) __A, + (__v8hf) __B, __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmul_round_sch (__m128h __A, __mmask8 __B, __m128h __C, + __m128h __D, const int __E) +{ + return (__m128h) + __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C, + (__v8hf) __D, + (__v8hf) __A, + __B, __E); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmul_round_sch (__mmask8 __A, __m128h __B, __m128h __C, const int __E) +{ + return (__m128h) + __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B, + (__v8hf) __C, + _mm_setzero_ph (), + __A, __E); +} + +#else +#define _mm_fcmul_round_sch(__A, __B, __D) \ + (__m128h) __builtin_ia32_vfcmulcsh_round ((__v8hf) __A, \ + (__v8hf) __B, __D) + +#define _mm_mask_fcmul_round_sch(__A, __B, __C, __D, __E) \ + (__m128h) __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __C, \ + (__v8hf) __D, \ + (__v8hf) __A, \ + __B, __E) + +#define _mm_maskz_fcmul_round_sch(__A, __B, __C, __E) \ + (__m128h) __builtin_ia32_vfcmulcsh_mask_round ((__v8hf) __B, \ + (__v8hf) __C, \ + _mm_setzero_ph (), \ + __A, __E) + +#define _mm_fmul_round_sch(__A, __B, __D) \ + (__m128h) __builtin_ia32_vfmulcsh_round ((__v8hf) __A, \ + (__v8hf) __B, __D) + +#define _mm_mask_fmul_round_sch(__A, __B, __C, __D, __E) \ + (__m128h) __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __C, \ + (__v8hf) __D, \ + (__v8hf) __A, \ + __B, __E) + +#define _mm_maskz_fmul_round_sch(__A, __B, __C, __E) \ + (__m128h) __builtin_ia32_vfmulcsh_mask_round ((__v8hf) __B, \ + (__v8hf) __C, \ + _mm_setzero_ph (), \ + __A, __E) + +#endif /* __OPTIMIZE__ */ + +#define _MM512_REDUCE_OP(op) \ + __m256h __T1 = (__m256h) _mm512_extractf64x4_pd ((__m512d) __A, 0); \ + __m256h __T2 = (__m256h) _mm512_extractf64x4_pd ((__m512d) __A, 1); \ + __m256h __T3 = (__T1 op __T2); \ + __m128h __T4 = (__m128h) _mm256_extractf128_pd ((__m256d) __T3, 0); \ + __m128h __T5 = (__m128h) _mm256_extractf128_pd ((__m256d) __T3, 1); \ + __m128h __T6 = (__T4 op __T5); \ + __m128h __T7 = (__m128h) __builtin_shuffle ((__m128h)__T6, \ + (__v8hi) { 4, 5, 6, 7, 0, 1, 2, 3 }); \ + __m128h __T8 = (__T6 op __T7); \ + __m128h __T9 = (__m128h) __builtin_shuffle ((__m128h)__T8, \ + (__v8hi) { 2, 3, 0, 1, 4, 5, 6, 7 }); \ + __m128h __T10 = __T8 op __T9; \ + return __T10[0] op __T10[1] + +// TODO reduce +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_add_ph (__m512h __A) +{ + _MM512_REDUCE_OP (+); +} + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_mul_ph (__m512h __A) +{ + _MM512_REDUCE_OP (*); +} + +#undef _MM512_REDUCE_OP + +#ifdef __AVX512VL__ + +#define _MM512_REDUCE_OP(op) \ + __m256h __T1 = (__m256h) _mm512_extractf64x4_pd ((__m512d) __A, 0); \ + __m256h __T2 = (__m256h) _mm512_extractf64x4_pd ((__m512d) __A, 1); \ + __m256h __T3 = __builtin_ia32_##op##ph256_mask (__T1, __T2, \ + _mm256_setzero_ph (), (__mmask16) -1); \ + __m128h __T4 = (__m128h) _mm256_extractf128_pd ((__m256d) __T3, 0); \ + __m128h __T5 = (__m128h) _mm256_extractf128_pd ((__m256d) __T3, 1); \ + __m128h __T6 = __builtin_ia32_##op##ph128_mask \ + (__T4, __T5, _mm_setzero_ph (),(__mmask8) -1); \ + __m128h __T7 = (__m128h) __builtin_shuffle ((__m128h)__T6, \ + (__v8hi) { 2, 3, 0, 1, 6, 7, 4, 5 }); \ + __m128h __T8 = (__m128h) __builtin_ia32_##op##ph128_mask \ + (__T6, __T7, _mm_setzero_ph (),(__mmask8) -1); \ + __m128h __T9 = (__m128h) __builtin_shuffle ((__m128h)__T8, \ + (__v8hi) { 4, 5 }); \ + __m128h __T10 = __builtin_ia32_##op##ph128_mask \ + (__T8, __T9, _mm_setzero_ph (),(__mmask8) -1); \ + __m128h __T11 = (__m128h) __builtin_shuffle (__T10, \ + (__v8hi) { 1, 0 }); \ + __m128h __T12 = __builtin_ia32_##op##ph128_mask \ + (__T10, __T11, _mm_setzero_ph (),(__mmask8) -1); \ + return __T12[0] + +#else + +#define _MM512_REDUCE_OP(op) \ + __m512h __T1 = (__m512h) __builtin_shuffle ((__m512d) __A, \ + (__v8di) { 4, 5, 6, 7, 0, 0, 0, 0 }); \ + __m512h __T2 = _mm512_##op##_ph (__A, __T1); \ + __m512h __T3 = (__m512h) __builtin_shuffle ((__m512d) __T2, \ + (__v8di) { 2, 3, 0, 0, 0, 0, 0, 0 }); \ + __m512h __T4 = _mm512_##op##_ph (__T2, __T3); \ + __m512h __T5 = (__m512h) __builtin_shuffle ((__m512d) __T4, \ + (__v8di) { 1, 0, 0, 0, 0, 0, 0, 0 }); \ + __m512h __T6 = _mm512_##op##_ph (__T4, __T5); \ + __m512h __T7 = (__m512h) __builtin_shuffle ((__m512) __T6, \ + (__v16si) { 1, 0, 0, 0, 0, 0, 0, 0, \ + 0, 0, 0, 0, 0, 0, 0, 0 }); \ + __m512h __T8 = _mm512_##op##_ph (__T6, __T7); \ + __m512h __T9 = (__m512h) __builtin_shuffle (__T8, \ + (__v32hi) { 1, 0, 0, 0, 0, 0, 0, 0, \ + 0, 0, 0, 0, 0, 0, 0, 0, \ + 0, 0, 0, 0, 0, 0, 0, 0, \ + 0, 0, 0, 0, 0, 0, 0, 0 }); \ + __m512h __T10 = _mm512_##op##_ph (__T8, __T9); \ + return __T10[0] +#endif + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_min_ph (__m512h __A) +{ + _MM512_REDUCE_OP (min); +} + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_reduce_max_ph (__m512h __A) +{ + _MM512_REDUCE_OP (max); +} + +#undef _MM512_REDUCE_OP + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_blend_ph (__mmask32 __U, __m512h __A, __m512h __W) +{ + return (__m512h) __builtin_ia32_movdquhi512_mask ((__v32hi) __W, + (__v32hi) __A, + (__mmask32) __U); + +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutex2var_ph (__m512h __A, __m512i __I, __m512h __B) +{ + return (__m512h) __builtin_ia32_vpermi2varhi512_mask ((__v32hi) __A, + (__v32hi) __I, + (__v32hi) __B, + (__mmask32)-1); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutexvar_ph (__m512i __A, __m512h __B) +{ + return (__m512h) __builtin_ia32_permvarhi512_mask ((__v32hi) __B, + (__v32hi) __A, + (__v32hi) + (_mm512_setzero_ph ()), + (__mmask32)-1); +} + +extern __inline __m512h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_set1_pch (_Float16 _Complex __A) +{ + union + { + _Float16 _Complex a; + float b; + } u = { .a = __A}; + + return (__m512h) _mm512_set1_ps (u.b); +} + +// intrinsics below are alias for f*mul_*ch +#define _mm512_mul_pch(A, B) _mm512_fmul_pch ((A), (B)) +#define _mm512_mask_mul_pch(W, U, A, B) \ + _mm512_mask_fmul_pch ((W), (U), (A), (B)) +#define _mm512_maskz_mul_pch(U, A, B) _mm512_maskz_fmul_pch ((U), (A), (B)) +#define _mm512_mul_round_pch(A, B, R) _mm512_fmul_round_pch ((A), (B), (R)) +#define _mm512_mask_mul_round_pch(W, U, A, B, R) \ + _mm512_mask_fmul_round_pch ((W), (U), (A), (B), (R)) +#define _mm512_maskz_mul_round_pch(U, A, B, R) \ + _mm512_maskz_fmul_round_pch ((U), (A), (B), (R)) + +#define _mm512_cmul_pch(A, B) _mm512_fcmul_pch ((A), (B)) +#define _mm512_mask_cmul_pch(W, U, A, B) \ + _mm512_mask_fcmul_pch ((W), (U), (A), (B)) +#define _mm512_maskz_cmul_pch(U, A, B) _mm512_maskz_fcmul_pch ((U), (A), (B)) +#define _mm512_cmul_round_pch(A, B, R) _mm512_fcmul_round_pch ((A), (B), (R)) +#define _mm512_mask_cmul_round_pch(W, U, A, B, R) \ + _mm512_mask_fcmul_round_pch ((W), (U), (A), (B), (R)) +#define _mm512_maskz_cmul_round_pch(U, A, B, R) \ + _mm512_maskz_fcmul_round_pch ((U), (A), (B), (R)) + +#define _mm_mul_sch(A, B) _mm_fmul_sch ((A), (B)) +#define _mm_mask_mul_sch(W, U, A, B) _mm_mask_fmul_sch ((W), (U), (A), (B)) +#define _mm_maskz_mul_sch(U, A, B) _mm_maskz_fmul_sch ((U), (A), (B)) +#define _mm_mul_round_sch(A, B, R) _mm_fmul_round_sch ((A), (B), (R)) +#define _mm_mask_mul_round_sch(W, U, A, B, R) \ + _mm_mask_fmul_round_sch ((W), (U), (A), (B), (R)) +#define _mm_maskz_mul_round_sch(U, A, B, R) \ + _mm_maskz_fmul_round_sch ((U), (A), (B), (R)) + +#define _mm_cmul_sch(A, B) _mm_fcmul_sch ((A), (B)) +#define _mm_mask_cmul_sch(W, U, A, B) _mm_mask_fcmul_sch ((W), (U), (A), (B)) +#define _mm_maskz_cmul_sch(U, A, B) _mm_maskz_fcmul_sch ((U), (A), (B)) +#define _mm_cmul_round_sch(A, B, R) _mm_fcmul_round_sch ((A), (B), (R)) +#define _mm_mask_cmul_round_sch(W, U, A, B, R) \ + _mm_mask_fcmul_round_sch ((W), (U), (A), (B), (R)) +#define _mm_maskz_cmul_round_sch(U, A, B, R) \ + _mm_maskz_fcmul_round_sch ((U), (A), (B), (R)) + +#ifdef __DISABLE_AVX512FP16__ +#undef __DISABLE_AVX512FP16__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512FP16__ */ + +#endif /* __AVX512FP16INTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512fp16vlintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512fp16vlintrin.h new file mode 100644 index 0000000..0794498 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512fp16vlintrin.h @@ -0,0 +1,3362 @@ +/* Copyright (C) 2019-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512FP16VLINTRIN_H_INCLUDED +#define __AVX512FP16VLINTRIN_H_INCLUDED + +#if !defined(__AVX512VL__) || !defined(__AVX512FP16__) +#pragma GCC push_options +#pragma GCC target("avx512fp16,avx512vl") +#define __DISABLE_AVX512FP16VL__ +#endif /* __AVX512FP16VL__ */ + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castph_ps (__m128h __a) +{ + return (__m128) __a; +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castph_ps (__m256h __a) +{ + return (__m256) __a; +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castph_pd (__m128h __a) +{ + return (__m128d) __a; +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castph_pd (__m256h __a) +{ + return (__m256d) __a; +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castph_si128 (__m128h __a) +{ + return (__m128i) __a; +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castph_si256 (__m256h __a) +{ + return (__m256i) __a; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castps_ph (__m128 __a) +{ + return (__m128h) __a; +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castps_ph (__m256 __a) +{ + return (__m256h) __a; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castpd_ph (__m128d __a) +{ + return (__m128h) __a; +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castpd_ph (__m256d __a) +{ + return (__m256h) __a; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castsi128_ph (__m128i __a) +{ + return (__m128h) __a; +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castsi256_ph (__m256i __a) +{ + return (__m256h) __a; +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castph256_ph128 (__m256h __A) +{ + union + { + __m128h a[2]; + __m256h v; + } u = { .v = __A }; + return u.a[0]; +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castph128_ph256 (__m128h __A) +{ + union + { + __m128h a[2]; + __m256h v; + } u; + u.a[0] = __A; + return u.v; +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_zextph128_ph256 (__m128h __A) +{ + return (__m256h) _mm256_insertf128_ps (_mm256_setzero_ps (), + (__m128) __A, 0); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_conj_pch (__m256h __A) +{ + return (__m256h) _mm256_xor_epi32 ((__m256i) __A, _mm256_set1_epi32 (1<<31)); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_conj_pch (__m256h __W, __mmask8 __U, __m256h __A) +{ + return (__m256h) __builtin_ia32_movaps256_mask ((__v8sf) + _mm256_conj_pch (__A), + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_conj_pch (__mmask8 __U, __m256h __A) +{ + return (__m256h) __builtin_ia32_movaps256_mask ((__v8sf) + _mm256_conj_pch (__A), + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_conj_pch (__m128h __A) +{ + return (__m128h) _mm_xor_epi32 ((__m128i) __A, _mm_set1_epi32 (1<<31)); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_conj_pch (__m128h __W, __mmask8 __U, __m128h __A) +{ + return (__m128h) __builtin_ia32_movaps128_mask ((__v4sf) _mm_conj_pch (__A), + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_conj_pch (__mmask8 __U, __m128h __A) +{ + return (__m128h) __builtin_ia32_movaps128_mask ((__v4sf) _mm_conj_pch (__A), + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + +/* Intrinsics v[add,sub,mul,div]ph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_ph (__m128h __A, __m128h __B) +{ + return (__m128h) ((__v8hf) __A + (__v8hf) __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_add_ph (__m256h __A, __m256h __B) +{ + return (__m256h) ((__v16hf) __A + (__v16hf) __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_ph (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_addph128_mask (__C, __D, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_add_ph (__m256h __A, __mmask16 __B, __m256h __C, __m256h __D) +{ + return __builtin_ia32_addph256_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_ph (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_addph128_mask (__B, __C, _mm_setzero_ph (), + __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_add_ph (__mmask16 __A, __m256h __B, __m256h __C) +{ + return __builtin_ia32_addph256_mask (__B, __C, + _mm256_setzero_ph (), __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_ph (__m128h __A, __m128h __B) +{ + return (__m128h) ((__v8hf) __A - (__v8hf) __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sub_ph (__m256h __A, __m256h __B) +{ + return (__m256h) ((__v16hf) __A - (__v16hf) __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_ph (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_subph128_mask (__C, __D, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sub_ph (__m256h __A, __mmask16 __B, __m256h __C, __m256h __D) +{ + return __builtin_ia32_subph256_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_ph (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_subph128_mask (__B, __C, _mm_setzero_ph (), + __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sub_ph (__mmask16 __A, __m256h __B, __m256h __C) +{ + return __builtin_ia32_subph256_mask (__B, __C, + _mm256_setzero_ph (), __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_ph (__m128h __A, __m128h __B) +{ + return (__m128h) ((__v8hf) __A * (__v8hf) __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mul_ph (__m256h __A, __m256h __B) +{ + return (__m256h) ((__v16hf) __A * (__v16hf) __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mul_ph (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_mulph128_mask (__C, __D, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mul_ph (__m256h __A, __mmask16 __B, __m256h __C, __m256h __D) +{ + return __builtin_ia32_mulph256_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mul_ph (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_mulph128_mask (__B, __C, _mm_setzero_ph (), + __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mul_ph (__mmask16 __A, __m256h __B, __m256h __C) +{ + return __builtin_ia32_mulph256_mask (__B, __C, + _mm256_setzero_ph (), __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_ph (__m128h __A, __m128h __B) +{ + return (__m128h) ((__v8hf) __A / (__v8hf) __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_div_ph (__m256h __A, __m256h __B) +{ + return (__m256h) ((__v16hf) __A / (__v16hf) __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_div_ph (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_divph128_mask (__C, __D, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_div_ph (__m256h __A, __mmask16 __B, __m256h __C, __m256h __D) +{ + return __builtin_ia32_divph256_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_div_ph (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_divph128_mask (__B, __C, _mm_setzero_ph (), + __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_div_ph (__mmask16 __A, __m256h __B, __m256h __C) +{ + return __builtin_ia32_divph256_mask (__B, __C, + _mm256_setzero_ph (), __A); +} + +/* Intrinsics v[max,min]ph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_ph (__m128h __A, __m128h __B) +{ + return __builtin_ia32_maxph128_mask (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_ph (__m256h __A, __m256h __B) +{ + return __builtin_ia32_maxph256_mask (__A, __B, + _mm256_setzero_ph (), + (__mmask16) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_ph (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_maxph128_mask (__C, __D, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_ph (__m256h __A, __mmask16 __B, __m256h __C, __m256h __D) +{ + return __builtin_ia32_maxph256_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_ph (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_maxph128_mask (__B, __C, _mm_setzero_ph (), + __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_ph (__mmask16 __A, __m256h __B, __m256h __C) +{ + return __builtin_ia32_maxph256_mask (__B, __C, + _mm256_setzero_ph (), __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_ph (__m128h __A, __m128h __B) +{ + return __builtin_ia32_minph128_mask (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_ph (__m256h __A, __m256h __B) +{ + return __builtin_ia32_minph256_mask (__A, __B, + _mm256_setzero_ph (), + (__mmask16) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_ph (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_minph128_mask (__C, __D, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_ph (__m256h __A, __mmask16 __B, __m256h __C, __m256h __D) +{ + return __builtin_ia32_minph256_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_ph (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_minph128_mask (__B, __C, _mm_setzero_ph (), + __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_ph (__mmask16 __A, __m256h __B, __m256h __C) +{ + return __builtin_ia32_minph256_mask (__B, __C, + _mm256_setzero_ph (), __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_ph (__m128h __A) +{ + return (__m128h) _mm_and_si128 ( _mm_set1_epi32 (0x7FFF7FFF), + (__m128i) __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_abs_ph (__m256h __A) +{ + return (__m256h) _mm256_and_si256 ( _mm256_set1_epi32 (0x7FFF7FFF), + (__m256i) __A); +} + +/* vcmpph */ +#ifdef __OPTIMIZE +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_ph_mask (__m128h __A, __m128h __B, const int __C) +{ + return (__mmask8) __builtin_ia32_cmpph128_mask (__A, __B, __C, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_ph_mask (__mmask8 __A, __m128h __B, __m128h __C, + const int __D) +{ + return (__mmask8) __builtin_ia32_cmpph128_mask (__B, __C, __D, __A); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_ph_mask (__m256h __A, __m256h __B, const int __C) +{ + return (__mmask16) __builtin_ia32_cmpph256_mask (__A, __B, __C, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_ph_mask (__mmask16 __A, __m256h __B, __m256h __C, + const int __D) +{ + return (__mmask16) __builtin_ia32_cmpph256_mask (__B, __C, __D, + __A); +} + +#else +#define _mm_cmp_ph_mask(A, B, C) \ + (__builtin_ia32_cmpph128_mask ((A), (B), (C), (-1))) + +#define _mm_mask_cmp_ph_mask(A, B, C, D) \ + (__builtin_ia32_cmpph128_mask ((B), (C), (D), (A))) + +#define _mm256_cmp_ph_mask(A, B, C) \ + (__builtin_ia32_cmpph256_mask ((A), (B), (C), (-1))) + +#define _mm256_mask_cmp_ph_mask(A, B, C, D) \ + (__builtin_ia32_cmpph256_mask ((B), (C), (D), (A))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vsqrtph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_ph (__m128h __A) +{ + return __builtin_ia32_sqrtph128_mask (__A, _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sqrt_ph (__m256h __A) +{ + return __builtin_ia32_sqrtph256_mask (__A, _mm256_setzero_ph (), + (__mmask16) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sqrt_ph (__m128h __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_sqrtph128_mask (__C, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sqrt_ph (__m256h __A, __mmask16 __B, __m256h __C) +{ + return __builtin_ia32_sqrtph256_mask (__C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sqrt_ph (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_sqrtph128_mask (__B, _mm_setzero_ph (), + __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sqrt_ph (__mmask16 __A, __m256h __B) +{ + return __builtin_ia32_sqrtph256_mask (__B, _mm256_setzero_ph (), + __A); +} + +/* Intrinsics vrsqrtph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt_ph (__m128h __A) +{ + return __builtin_ia32_rsqrtph128_mask (__A, _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rsqrt_ph (__m256h __A) +{ + return __builtin_ia32_rsqrtph256_mask (__A, _mm256_setzero_ph (), + (__mmask16) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rsqrt_ph (__m128h __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_rsqrtph128_mask (__C, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_rsqrt_ph (__m256h __A, __mmask16 __B, __m256h __C) +{ + return __builtin_ia32_rsqrtph256_mask (__C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rsqrt_ph (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_rsqrtph128_mask (__B, _mm_setzero_ph (), __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_rsqrt_ph (__mmask16 __A, __m256h __B) +{ + return __builtin_ia32_rsqrtph256_mask (__B, _mm256_setzero_ph (), + __A); +} + +/* Intrinsics vrcpph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp_ph (__m128h __A) +{ + return __builtin_ia32_rcpph128_mask (__A, _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rcp_ph (__m256h __A) +{ + return __builtin_ia32_rcpph256_mask (__A, _mm256_setzero_ph (), + (__mmask16) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rcp_ph (__m128h __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_rcpph128_mask (__C, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_rcp_ph (__m256h __A, __mmask16 __B, __m256h __C) +{ + return __builtin_ia32_rcpph256_mask (__C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rcp_ph (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_rcpph128_mask (__B, _mm_setzero_ph (), __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_rcp_ph (__mmask16 __A, __m256h __B) +{ + return __builtin_ia32_rcpph256_mask (__B, _mm256_setzero_ph (), + __A); +} + +/* Intrinsics vscalefph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_scalef_ph (__m128h __A, __m128h __B) +{ + return __builtin_ia32_scalefph128_mask (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_scalef_ph (__m256h __A, __m256h __B) +{ + return __builtin_ia32_scalefph256_mask (__A, __B, + _mm256_setzero_ph (), + (__mmask16) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_scalef_ph (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return __builtin_ia32_scalefph128_mask (__C, __D, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_scalef_ph (__m256h __A, __mmask16 __B, __m256h __C, + __m256h __D) +{ + return __builtin_ia32_scalefph256_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_scalef_ph (__mmask8 __A, __m128h __B, __m128h __C) +{ + return __builtin_ia32_scalefph128_mask (__B, __C, + _mm_setzero_ph (), __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_scalef_ph (__mmask16 __A, __m256h __B, __m256h __C) +{ + return __builtin_ia32_scalefph256_mask (__B, __C, + _mm256_setzero_ph (), + __A); +} + +/* Intrinsics vreduceph. */ +#ifdef __OPTIMIZE__ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_ph (__m128h __A, int __B) +{ + return __builtin_ia32_reduceph128_mask (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_reduce_ph (__m128h __A, __mmask8 __B, __m128h __C, int __D) +{ + return __builtin_ia32_reduceph128_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_reduce_ph (__mmask8 __A, __m128h __B, int __C) +{ + return __builtin_ia32_reduceph128_mask (__B, __C, + _mm_setzero_ph (), __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_reduce_ph (__m256h __A, int __B) +{ + return __builtin_ia32_reduceph256_mask (__A, __B, + _mm256_setzero_ph (), + (__mmask16) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_reduce_ph (__m256h __A, __mmask16 __B, __m256h __C, int __D) +{ + return __builtin_ia32_reduceph256_mask (__C, __D, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_reduce_ph (__mmask16 __A, __m256h __B, int __C) +{ + return __builtin_ia32_reduceph256_mask (__B, __C, + _mm256_setzero_ph (), + __A); +} + +#else +#define _mm_reduce_ph(A, B) \ + (__builtin_ia32_reduceph128_mask ((A), (B), \ + _mm_setzero_ph (), \ + ((__mmask8)-1))) + +#define _mm_mask_reduce_ph(A, B, C, D) \ + (__builtin_ia32_reduceph128_mask ((C), (D), (A), (B))) + +#define _mm_maskz_reduce_ph(A, B, C) \ + (__builtin_ia32_reduceph128_mask ((B), (C), _mm_setzero_ph (), (A))) + +#define _mm256_reduce_ph(A, B) \ + (__builtin_ia32_reduceph256_mask ((A), (B), \ + _mm256_setzero_ph (), \ + ((__mmask16)-1))) + +#define _mm256_mask_reduce_ph(A, B, C, D) \ + (__builtin_ia32_reduceph256_mask ((C), (D), (A), (B))) + +#define _mm256_maskz_reduce_ph(A, B, C) \ + (__builtin_ia32_reduceph256_mask ((B), (C), _mm256_setzero_ph (), (A))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vrndscaleph. */ +#ifdef __OPTIMIZE__ + extern __inline __m128h + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) + _mm_roundscale_ph (__m128h __A, int __B) + { + return __builtin_ia32_rndscaleph128_mask (__A, __B, + _mm_setzero_ph (), + (__mmask8) -1); + } + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_roundscale_ph (__m128h __A, __mmask8 __B, __m128h __C, int __D) +{ + return __builtin_ia32_rndscaleph128_mask (__C, __D, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_roundscale_ph (__mmask8 __A, __m128h __B, int __C) +{ + return __builtin_ia32_rndscaleph128_mask (__B, __C, + _mm_setzero_ph (), __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_roundscale_ph (__m256h __A, int __B) +{ + return __builtin_ia32_rndscaleph256_mask (__A, __B, + _mm256_setzero_ph (), + (__mmask16) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_roundscale_ph (__m256h __A, __mmask16 __B, __m256h __C, + int __D) +{ + return __builtin_ia32_rndscaleph256_mask (__C, __D, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_roundscale_ph (__mmask16 __A, __m256h __B, int __C) +{ + return __builtin_ia32_rndscaleph256_mask (__B, __C, + _mm256_setzero_ph (), + __A); +} + +#else +#define _mm_roundscale_ph(A, B) \ + (__builtin_ia32_rndscaleph128_mask ((A), (B), _mm_setzero_ph (), \ + ((__mmask8)-1))) + +#define _mm_mask_roundscale_ph(A, B, C, D) \ + (__builtin_ia32_rndscaleph128_mask ((C), (D), (A), (B))) + +#define _mm_maskz_roundscale_ph(A, B, C) \ + (__builtin_ia32_rndscaleph128_mask ((B), (C), _mm_setzero_ph (), (A))) + +#define _mm256_roundscale_ph(A, B) \ + (__builtin_ia32_rndscaleph256_mask ((A), (B), \ + _mm256_setzero_ph(), \ + ((__mmask16)-1))) + +#define _mm256_mask_roundscale_ph(A, B, C, D) \ + (__builtin_ia32_rndscaleph256_mask ((C), (D), (A), (B))) + +#define _mm256_maskz_roundscale_ph(A, B, C) \ + (__builtin_ia32_rndscaleph256_mask ((B), (C), \ + _mm256_setzero_ph (), (A))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vfpclassph. */ +#ifdef __OPTIMIZE__ +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) + _mm_mask_fpclass_ph_mask (__mmask8 __U, __m128h __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclassph128_mask ((__v8hf) __A, + __imm, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fpclass_ph_mask (__m128h __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclassph128_mask ((__v8hf) __A, + __imm, + (__mmask8) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fpclass_ph_mask (__mmask16 __U, __m256h __A, const int __imm) +{ + return (__mmask16) __builtin_ia32_fpclassph256_mask ((__v16hf) __A, + __imm, __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fpclass_ph_mask (__m256h __A, const int __imm) +{ + return (__mmask16) __builtin_ia32_fpclassph256_mask ((__v16hf) __A, + __imm, + (__mmask16) -1); +} + +#else +#define _mm_fpclass_ph_mask(X, C) \ + ((__mmask8) __builtin_ia32_fpclassph128_mask ((__v8hf) (__m128h) (X), \ + (int) (C),(__mmask8)-1)) + +#define _mm_mask_fpclass_ph_mask(u, X, C) \ + ((__mmask8) __builtin_ia32_fpclassph128_mask ((__v8hf) (__m128h) (X), \ + (int) (C),(__mmask8)(u))) + +#define _mm256_fpclass_ph_mask(X, C) \ + ((__mmask16) __builtin_ia32_fpclassph256_mask ((__v16hf) (__m256h) (X), \ + (int) (C),(__mmask16)-1)) + +#define _mm256_mask_fpclass_ph_mask(u, X, C) \ + ((__mmask16) __builtin_ia32_fpclassph256_mask ((__v16hf) (__m256h) (X), \ + (int) (C),(__mmask16)(u))) +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vgetexpph, vgetexpsh. */ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_getexp_ph (__m256h __A) +{ + return (__m256h) __builtin_ia32_getexpph256_mask ((__v16hf) __A, + (__v16hf) + _mm256_setzero_ph (), + (__mmask16) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_getexp_ph (__m256h __W, __mmask16 __U, __m256h __A) +{ + return (__m256h) __builtin_ia32_getexpph256_mask ((__v16hf) __A, + (__v16hf) __W, + (__mmask16) __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_getexp_ph (__mmask16 __U, __m256h __A) +{ + return (__m256h) __builtin_ia32_getexpph256_mask ((__v16hf) __A, + (__v16hf) + _mm256_setzero_ph (), + (__mmask16) __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getexp_ph (__m128h __A) +{ + return (__m128h) __builtin_ia32_getexpph128_mask ((__v8hf) __A, + (__v8hf) + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getexp_ph (__m128h __W, __mmask8 __U, __m128h __A) +{ + return (__m128h) __builtin_ia32_getexpph128_mask ((__v8hf) __A, + (__v8hf) __W, + (__mmask8) __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getexp_ph (__mmask8 __U, __m128h __A) +{ + return (__m128h) __builtin_ia32_getexpph128_mask ((__v8hf) __A, + (__v8hf) + _mm_setzero_ph (), + (__mmask8) __U); +} + + +/* Intrinsics vgetmantph, vgetmantsh. */ +#ifdef __OPTIMIZE__ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_getmant_ph (__m256h __A, _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m256h) __builtin_ia32_getmantph256_mask ((__v16hf) __A, + (__C << 2) | __B, + (__v16hf) + _mm256_setzero_ph (), + (__mmask16) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_getmant_ph (__m256h __W, __mmask16 __U, __m256h __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m256h) __builtin_ia32_getmantph256_mask ((__v16hf) __A, + (__C << 2) | __B, + (__v16hf) __W, + (__mmask16) __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_getmant_ph (__mmask16 __U, __m256h __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m256h) __builtin_ia32_getmantph256_mask ((__v16hf) __A, + (__C << 2) | __B, + (__v16hf) + _mm256_setzero_ph (), + (__mmask16) __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getmant_ph (__m128h __A, _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m128h) __builtin_ia32_getmantph128_mask ((__v8hf) __A, + (__C << 2) | __B, + (__v8hf) + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getmant_ph (__m128h __W, __mmask8 __U, __m128h __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m128h) __builtin_ia32_getmantph128_mask ((__v8hf) __A, + (__C << 2) | __B, + (__v8hf) __W, + (__mmask8) __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getmant_ph (__mmask8 __U, __m128h __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m128h) __builtin_ia32_getmantph128_mask ((__v8hf) __A, + (__C << 2) | __B, + (__v8hf) + _mm_setzero_ph (), + (__mmask8) __U); +} + +#else +#define _mm256_getmant_ph(X, B, C) \ + ((__m256h) __builtin_ia32_getmantph256_mask ((__v16hf)(__m256h) (X), \ + (int)(((C)<<2) | (B)), \ + (__v16hf)(__m256h)_mm256_setzero_ph (), \ + (__mmask16)-1)) + +#define _mm256_mask_getmant_ph(W, U, X, B, C) \ + ((__m256h) __builtin_ia32_getmantph256_mask ((__v16hf)(__m256h) (X), \ + (int)(((C)<<2) | (B)), \ + (__v16hf)(__m256h)(W), \ + (__mmask16)(U))) + +#define _mm256_maskz_getmant_ph(U, X, B, C) \ + ((__m256h) __builtin_ia32_getmantph256_mask ((__v16hf)(__m256h) (X), \ + (int)(((C)<<2) | (B)), \ + (__v16hf)(__m256h)_mm256_setzero_ph (), \ + (__mmask16)(U))) + +#define _mm_getmant_ph(X, B, C) \ + ((__m128h) __builtin_ia32_getmantph128_mask ((__v8hf)(__m128h) (X), \ + (int)(((C)<<2) | (B)), \ + (__v8hf)(__m128h)_mm_setzero_ph (), \ + (__mmask8)-1)) + +#define _mm_mask_getmant_ph(W, U, X, B, C) \ + ((__m128h) __builtin_ia32_getmantph128_mask ((__v8hf)(__m128h) (X), \ + (int)(((C)<<2) | (B)), \ + (__v8hf)(__m128h)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_getmant_ph(U, X, B, C) \ + ((__m128h) __builtin_ia32_getmantph128_mask ((__v8hf)(__m128h) (X), \ + (int)(((C)<<2) | (B)), \ + (__v8hf)(__m128h)_mm_setzero_ph (), \ + (__mmask8)(U))) + +#endif /* __OPTIMIZE__ */ + +/* Intrinsics vcvtph2dq. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtph_epi32 (__m128h __A) +{ + return (__m128i) + __builtin_ia32_vcvtph2dq128_mask (__A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtph_epi32 (__m128i __A, __mmask8 __B, __m128h __C) +{ + return (__m128i) + __builtin_ia32_vcvtph2dq128_mask (__C, ( __v4si) __A, __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtph_epi32 (__mmask8 __A, __m128h __B) +{ + return (__m128i) + __builtin_ia32_vcvtph2dq128_mask (__B, + (__v4si) _mm_setzero_si128 (), + __A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtph_epi32 (__m128h __A) +{ + return (__m256i) + __builtin_ia32_vcvtph2dq256_mask (__A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtph_epi32 (__m256i __A, __mmask8 __B, __m128h __C) +{ + return (__m256i) + __builtin_ia32_vcvtph2dq256_mask (__C, ( __v8si) __A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtph_epi32 (__mmask8 __A, __m128h __B) +{ + return (__m256i) + __builtin_ia32_vcvtph2dq256_mask (__B, + (__v8si) + _mm256_setzero_si256 (), + __A); +} + +/* Intrinsics vcvtph2udq. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtph_epu32 (__m128h __A) +{ + return (__m128i) + __builtin_ia32_vcvtph2udq128_mask (__A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtph_epu32 (__m128i __A, __mmask8 __B, __m128h __C) +{ + return (__m128i) + __builtin_ia32_vcvtph2udq128_mask (__C, ( __v4si) __A, __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtph_epu32 (__mmask8 __A, __m128h __B) +{ + return (__m128i) + __builtin_ia32_vcvtph2udq128_mask (__B, + (__v4si) + _mm_setzero_si128 (), + __A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtph_epu32 (__m128h __A) +{ + return (__m256i) + __builtin_ia32_vcvtph2udq256_mask (__A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtph_epu32 (__m256i __A, __mmask8 __B, __m128h __C) +{ + return (__m256i) + __builtin_ia32_vcvtph2udq256_mask (__C, ( __v8si) __A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtph_epu32 (__mmask8 __A, __m128h __B) +{ + return (__m256i) + __builtin_ia32_vcvtph2udq256_mask (__B, + (__v8si) _mm256_setzero_si256 (), + __A); +} + +/* Intrinsics vcvttph2dq. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttph_epi32 (__m128h __A) +{ + return (__m128i) + __builtin_ia32_vcvttph2dq128_mask (__A, + (__v4si) _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttph_epi32 (__m128i __A, __mmask8 __B, __m128h __C) +{ + return (__m128i)__builtin_ia32_vcvttph2dq128_mask (__C, + ( __v4si) __A, + __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttph_epi32 (__mmask8 __A, __m128h __B) +{ + return (__m128i) + __builtin_ia32_vcvttph2dq128_mask (__B, + (__v4si) _mm_setzero_si128 (), + __A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttph_epi32 (__m128h __A) +{ + return (__m256i) + __builtin_ia32_vcvttph2dq256_mask (__A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttph_epi32 (__m256i __A, __mmask8 __B, __m128h __C) +{ + return (__m256i) + __builtin_ia32_vcvttph2dq256_mask (__C, + ( __v8si) __A, + __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttph_epi32 (__mmask8 __A, __m128h __B) +{ + return (__m256i) + __builtin_ia32_vcvttph2dq256_mask (__B, + (__v8si) + _mm256_setzero_si256 (), + __A); +} + +/* Intrinsics vcvttph2udq. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttph_epu32 (__m128h __A) +{ + return (__m128i) + __builtin_ia32_vcvttph2udq128_mask (__A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttph_epu32 (__m128i __A, __mmask8 __B, __m128h __C) +{ + return (__m128i) + __builtin_ia32_vcvttph2udq128_mask (__C, + ( __v4si) __A, + __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttph_epu32 (__mmask8 __A, __m128h __B) +{ + return (__m128i) + __builtin_ia32_vcvttph2udq128_mask (__B, + (__v4si) + _mm_setzero_si128 (), + __A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttph_epu32 (__m128h __A) +{ + return (__m256i) + __builtin_ia32_vcvttph2udq256_mask (__A, + (__v8si) + _mm256_setzero_si256 (), (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttph_epu32 (__m256i __A, __mmask8 __B, __m128h __C) +{ + return (__m256i) + __builtin_ia32_vcvttph2udq256_mask (__C, + ( __v8si) __A, + __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttph_epu32 (__mmask8 __A, __m128h __B) +{ + return (__m256i) + __builtin_ia32_vcvttph2udq256_mask (__B, + (__v8si) + _mm256_setzero_si256 (), + __A); +} + +/* Intrinsics vcvtdq2ph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi32_ph (__m128i __A) +{ + return __builtin_ia32_vcvtdq2ph128_mask ((__v4si) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi32_ph (__m128h __A, __mmask8 __B, __m128i __C) +{ + return __builtin_ia32_vcvtdq2ph128_mask ((__v4si) __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi32_ph (__mmask8 __A, __m128i __B) +{ + return __builtin_ia32_vcvtdq2ph128_mask ((__v4si) __B, + _mm_setzero_ph (), + __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi32_ph (__m256i __A) +{ + return __builtin_ia32_vcvtdq2ph256_mask ((__v8si) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi32_ph (__m128h __A, __mmask8 __B, __m256i __C) +{ + return __builtin_ia32_vcvtdq2ph256_mask ((__v8si) __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi32_ph (__mmask8 __A, __m256i __B) +{ + return __builtin_ia32_vcvtdq2ph256_mask ((__v8si) __B, + _mm_setzero_ph (), + __A); +} + +/* Intrinsics vcvtudq2ph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu32_ph (__m128i __A) +{ + return __builtin_ia32_vcvtudq2ph128_mask ((__v4si) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu32_ph (__m128h __A, __mmask8 __B, __m128i __C) +{ + return __builtin_ia32_vcvtudq2ph128_mask ((__v4si) __C, + __A, + __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu32_ph (__mmask8 __A, __m128i __B) +{ + return __builtin_ia32_vcvtudq2ph128_mask ((__v4si) __B, + _mm_setzero_ph (), + __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu32_ph (__m256i __A) +{ + return __builtin_ia32_vcvtudq2ph256_mask ((__v8si) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu32_ph (__m128h __A, __mmask8 __B, __m256i __C) +{ + return __builtin_ia32_vcvtudq2ph256_mask ((__v8si) __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu32_ph (__mmask8 __A, __m256i __B) +{ + return __builtin_ia32_vcvtudq2ph256_mask ((__v8si) __B, + _mm_setzero_ph (), + __A); +} + +/* Intrinsics vcvtph2qq. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtph_epi64 (__m128h __A) +{ + return + __builtin_ia32_vcvtph2qq128_mask (__A, + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtph_epi64 (__m128i __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvtph2qq128_mask (__C, __A, __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtph_epi64 (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvtph2qq128_mask (__B, + _mm_setzero_si128 (), + __A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtph_epi64 (__m128h __A) +{ + return __builtin_ia32_vcvtph2qq256_mask (__A, + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtph_epi64 (__m256i __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvtph2qq256_mask (__C, __A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtph_epi64 (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvtph2qq256_mask (__B, + _mm256_setzero_si256 (), + __A); +} + +/* Intrinsics vcvtph2uqq. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtph_epu64 (__m128h __A) +{ + return __builtin_ia32_vcvtph2uqq128_mask (__A, + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtph_epu64 (__m128i __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvtph2uqq128_mask (__C, __A, __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtph_epu64 (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvtph2uqq128_mask (__B, + _mm_setzero_si128 (), + __A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtph_epu64 (__m128h __A) +{ + return __builtin_ia32_vcvtph2uqq256_mask (__A, + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtph_epu64 (__m256i __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvtph2uqq256_mask (__C, __A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtph_epu64 (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvtph2uqq256_mask (__B, + _mm256_setzero_si256 (), + __A); +} + +/* Intrinsics vcvttph2qq. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttph_epi64 (__m128h __A) +{ + return __builtin_ia32_vcvttph2qq128_mask (__A, + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttph_epi64 (__m128i __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvttph2qq128_mask (__C, + __A, + __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttph_epi64 (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvttph2qq128_mask (__B, + _mm_setzero_si128 (), + __A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttph_epi64 (__m128h __A) +{ + return __builtin_ia32_vcvttph2qq256_mask (__A, + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttph_epi64 (__m256i __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvttph2qq256_mask (__C, + __A, + __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttph_epi64 (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvttph2qq256_mask (__B, + _mm256_setzero_si256 (), + __A); +} + +/* Intrinsics vcvttph2uqq. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttph_epu64 (__m128h __A) +{ + return __builtin_ia32_vcvttph2uqq128_mask (__A, + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttph_epu64 (__m128i __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvttph2uqq128_mask (__C, + __A, + __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttph_epu64 (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvttph2uqq128_mask (__B, + _mm_setzero_si128 (), + __A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttph_epu64 (__m128h __A) +{ + return __builtin_ia32_vcvttph2uqq256_mask (__A, + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttph_epu64 (__m256i __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvttph2uqq256_mask (__C, + __A, + __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttph_epu64 (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvttph2uqq256_mask (__B, + _mm256_setzero_si256 (), + __A); +} + +/* Intrinsics vcvtqq2ph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi64_ph (__m128i __A) +{ + return __builtin_ia32_vcvtqq2ph128_mask ((__v2di) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi64_ph (__m128h __A, __mmask8 __B, __m128i __C) +{ + return __builtin_ia32_vcvtqq2ph128_mask ((__v2di) __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi64_ph (__mmask8 __A, __m128i __B) +{ + return __builtin_ia32_vcvtqq2ph128_mask ((__v2di) __B, + _mm_setzero_ph (), + __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi64_ph (__m256i __A) +{ + return __builtin_ia32_vcvtqq2ph256_mask ((__v4di) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi64_ph (__m128h __A, __mmask8 __B, __m256i __C) +{ + return __builtin_ia32_vcvtqq2ph256_mask ((__v4di) __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi64_ph (__mmask8 __A, __m256i __B) +{ + return __builtin_ia32_vcvtqq2ph256_mask ((__v4di) __B, + _mm_setzero_ph (), + __A); +} + +/* Intrinsics vcvtuqq2ph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu64_ph (__m128i __A) +{ + return __builtin_ia32_vcvtuqq2ph128_mask ((__v2di) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu64_ph (__m128h __A, __mmask8 __B, __m128i __C) +{ + return __builtin_ia32_vcvtuqq2ph128_mask ((__v2di) __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu64_ph (__mmask8 __A, __m128i __B) +{ + return __builtin_ia32_vcvtuqq2ph128_mask ((__v2di) __B, + _mm_setzero_ph (), + __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu64_ph (__m256i __A) +{ + return __builtin_ia32_vcvtuqq2ph256_mask ((__v4di) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu64_ph (__m128h __A, __mmask8 __B, __m256i __C) +{ + return __builtin_ia32_vcvtuqq2ph256_mask ((__v4di) __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu64_ph (__mmask8 __A, __m256i __B) +{ + return __builtin_ia32_vcvtuqq2ph256_mask ((__v4di) __B, + _mm_setzero_ph (), + __A); +} + +/* Intrinsics vcvtph2w. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtph_epi16 (__m128h __A) +{ + return (__m128i) + __builtin_ia32_vcvtph2w128_mask (__A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtph_epi16 (__m128i __A, __mmask8 __B, __m128h __C) +{ + return (__m128i) + __builtin_ia32_vcvtph2w128_mask (__C, ( __v8hi) __A, __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtph_epi16 (__mmask8 __A, __m128h __B) +{ + return (__m128i) + __builtin_ia32_vcvtph2w128_mask (__B, + (__v8hi) + _mm_setzero_si128 (), + __A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtph_epi16 (__m256h __A) +{ + return (__m256i) + __builtin_ia32_vcvtph2w256_mask (__A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtph_epi16 (__m256i __A, __mmask16 __B, __m256h __C) +{ + return (__m256i) + __builtin_ia32_vcvtph2w256_mask (__C, ( __v16hi) __A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtph_epi16 (__mmask16 __A, __m256h __B) +{ + return (__m256i) + __builtin_ia32_vcvtph2w256_mask (__B, + (__v16hi) + _mm256_setzero_si256 (), + __A); +} + +/* Intrinsics vcvtph2uw. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtph_epu16 (__m128h __A) +{ + return (__m128i) + __builtin_ia32_vcvtph2uw128_mask (__A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtph_epu16 (__m128i __A, __mmask8 __B, __m128h __C) +{ + return (__m128i) + __builtin_ia32_vcvtph2uw128_mask (__C, ( __v8hi) __A, __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtph_epu16 (__mmask8 __A, __m128h __B) +{ + return (__m128i) + __builtin_ia32_vcvtph2uw128_mask (__B, + (__v8hi) + _mm_setzero_si128 (), + __A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtph_epu16 (__m256h __A) +{ + return (__m256i) + __builtin_ia32_vcvtph2uw256_mask (__A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtph_epu16 (__m256i __A, __mmask16 __B, __m256h __C) +{ + return (__m256i) + __builtin_ia32_vcvtph2uw256_mask (__C, ( __v16hi) __A, __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtph_epu16 (__mmask16 __A, __m256h __B) +{ + return (__m256i) + __builtin_ia32_vcvtph2uw256_mask (__B, + (__v16hi) + _mm256_setzero_si256 (), + __A); +} + +/* Intrinsics vcvttph2w. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttph_epi16 (__m128h __A) +{ + return (__m128i) + __builtin_ia32_vcvttph2w128_mask (__A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttph_epi16 (__m128i __A, __mmask8 __B, __m128h __C) +{ + return (__m128i) + __builtin_ia32_vcvttph2w128_mask (__C, + ( __v8hi) __A, + __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttph_epi16 (__mmask8 __A, __m128h __B) +{ + return (__m128i) + __builtin_ia32_vcvttph2w128_mask (__B, + (__v8hi) + _mm_setzero_si128 (), + __A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttph_epi16 (__m256h __A) +{ + return (__m256i) + __builtin_ia32_vcvttph2w256_mask (__A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttph_epi16 (__m256i __A, __mmask16 __B, __m256h __C) +{ + return (__m256i) + __builtin_ia32_vcvttph2w256_mask (__C, + ( __v16hi) __A, + __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttph_epi16 (__mmask16 __A, __m256h __B) +{ + return (__m256i) + __builtin_ia32_vcvttph2w256_mask (__B, + (__v16hi) + _mm256_setzero_si256 (), + __A); +} + +/* Intrinsics vcvttph2uw. */ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttph_epu16 (__m128h __A) +{ + return (__m128i) + __builtin_ia32_vcvttph2uw128_mask (__A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttph_epu16 (__m128i __A, __mmask8 __B, __m128h __C) +{ + return (__m128i) + __builtin_ia32_vcvttph2uw128_mask (__C, + ( __v8hi) __A, + __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttph_epu16 (__mmask8 __A, __m128h __B) +{ + return (__m128i) + __builtin_ia32_vcvttph2uw128_mask (__B, + (__v8hi) + _mm_setzero_si128 (), + __A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttph_epu16 (__m256h __A) +{ + return (__m256i) + __builtin_ia32_vcvttph2uw256_mask (__A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttph_epu16 (__m256i __A, __mmask16 __B, __m256h __C) +{ + return (__m256i) + __builtin_ia32_vcvttph2uw256_mask (__C, + ( __v16hi) __A, + __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttph_epu16 (__mmask16 __A, __m256h __B) +{ + return (__m256i) + __builtin_ia32_vcvttph2uw256_mask (__B, + (__v16hi) _mm256_setzero_si256 (), + __A); +} + +/* Intrinsics vcvtw2ph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi16_ph (__m128i __A) +{ + return __builtin_ia32_vcvtw2ph128_mask ((__v8hi) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi16_ph (__m128h __A, __mmask8 __B, __m128i __C) +{ + return __builtin_ia32_vcvtw2ph128_mask ((__v8hi) __C, + __A, + __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi16_ph (__mmask8 __A, __m128i __B) +{ + return __builtin_ia32_vcvtw2ph128_mask ((__v8hi) __B, + _mm_setzero_ph (), + __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi16_ph (__m256i __A) +{ + return __builtin_ia32_vcvtw2ph256_mask ((__v16hi) __A, + _mm256_setzero_ph (), + (__mmask16) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi16_ph (__m256h __A, __mmask16 __B, __m256i __C) +{ + return __builtin_ia32_vcvtw2ph256_mask ((__v16hi) __C, + __A, + __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi16_ph (__mmask16 __A, __m256i __B) +{ + return __builtin_ia32_vcvtw2ph256_mask ((__v16hi) __B, + _mm256_setzero_ph (), + __A); +} + +/* Intrinsics vcvtuw2ph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu16_ph (__m128i __A) +{ + return __builtin_ia32_vcvtuw2ph128_mask ((__v8hi) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu16_ph (__m128h __A, __mmask8 __B, __m128i __C) +{ + return __builtin_ia32_vcvtuw2ph128_mask ((__v8hi) __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu16_ph (__mmask8 __A, __m128i __B) +{ + return __builtin_ia32_vcvtuw2ph128_mask ((__v8hi) __B, + _mm_setzero_ph (), + __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu16_ph (__m256i __A) +{ + return __builtin_ia32_vcvtuw2ph256_mask ((__v16hi) __A, + _mm256_setzero_ph (), + (__mmask16) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu16_ph (__m256h __A, __mmask16 __B, __m256i __C) +{ + return __builtin_ia32_vcvtuw2ph256_mask ((__v16hi) __C, __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu16_ph (__mmask16 __A, __m256i __B) +{ + return __builtin_ia32_vcvtuw2ph256_mask ((__v16hi) __B, + _mm256_setzero_ph (), + __A); +} + +/* Intrinsics vcvtph2pd. */ +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtph_pd (__m128h __A) +{ + return __builtin_ia32_vcvtph2pd128_mask (__A, + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtph_pd (__m128d __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvtph2pd128_mask (__C, __A, __B); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtph_pd (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvtph2pd128_mask (__B, _mm_setzero_pd (), __A); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtph_pd (__m128h __A) +{ + return __builtin_ia32_vcvtph2pd256_mask (__A, + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtph_pd (__m256d __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvtph2pd256_mask (__C, __A, __B); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtph_pd (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvtph2pd256_mask (__B, + _mm256_setzero_pd (), + __A); +} + +/* Intrinsics vcvtph2ps. */ +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtxph_ps (__m128h __A) +{ + return __builtin_ia32_vcvtph2psx128_mask (__A, + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtxph_ps (__m128 __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvtph2psx128_mask (__C, __A, __B); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtxph_ps (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvtph2psx128_mask (__B, _mm_setzero_ps (), __A); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtxph_ps (__m128h __A) +{ + return __builtin_ia32_vcvtph2psx256_mask (__A, + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtxph_ps (__m256 __A, __mmask8 __B, __m128h __C) +{ + return __builtin_ia32_vcvtph2psx256_mask (__C, __A, __B); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtxph_ps (__mmask8 __A, __m128h __B) +{ + return __builtin_ia32_vcvtph2psx256_mask (__B, + _mm256_setzero_ps (), + __A); +} + +/* Intrinsics vcvtxps2ph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtxps_ph (__m128 __A) +{ + return __builtin_ia32_vcvtps2phx128_mask ((__v4sf) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtxps_ph (__m128h __A, __mmask8 __B, __m128 __C) +{ + return __builtin_ia32_vcvtps2phx128_mask ((__v4sf) __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtxps_ph (__mmask8 __A, __m128 __B) +{ + return __builtin_ia32_vcvtps2phx128_mask ((__v4sf) __B, + _mm_setzero_ph (), + __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtxps_ph (__m256 __A) +{ + return __builtin_ia32_vcvtps2phx256_mask ((__v8sf) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtxps_ph (__m128h __A, __mmask8 __B, __m256 __C) +{ + return __builtin_ia32_vcvtps2phx256_mask ((__v8sf) __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtxps_ph (__mmask8 __A, __m256 __B) +{ + return __builtin_ia32_vcvtps2phx256_mask ((__v8sf) __B, + _mm_setzero_ph (), + __A); +} + +/* Intrinsics vcvtpd2ph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_ph (__m128d __A) +{ + return __builtin_ia32_vcvtpd2ph128_mask ((__v2df) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtpd_ph (__m128h __A, __mmask8 __B, __m128d __C) +{ + return __builtin_ia32_vcvtpd2ph128_mask ((__v2df) __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtpd_ph (__mmask8 __A, __m128d __B) +{ + return __builtin_ia32_vcvtpd2ph128_mask ((__v2df) __B, + _mm_setzero_ph (), + __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtpd_ph (__m256d __A) +{ + return __builtin_ia32_vcvtpd2ph256_mask ((__v4df) __A, + _mm_setzero_ph (), + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtpd_ph (__m128h __A, __mmask8 __B, __m256d __C) +{ + return __builtin_ia32_vcvtpd2ph256_mask ((__v4df) __C, __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtpd_ph (__mmask8 __A, __m256d __B) +{ + return __builtin_ia32_vcvtpd2ph256_mask ((__v4df) __B, + _mm_setzero_ph (), + __A); +} + +/* Intrinsics vfmaddsub[132,213,231]ph. */ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmaddsub_ph (__m256h __A, __m256h __B, __m256h __C) +{ + return (__m256h)__builtin_ia32_vfmaddsubph256_mask ((__v16hf)__A, + (__v16hf)__B, + (__v16hf)__C, + (__mmask16)-1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmaddsub_ph (__m256h __A, __mmask16 __U, __m256h __B, + __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmaddsubph256_mask ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmaddsub_ph (__m256h __A, __m256h __B, __m256h __C, + __mmask16 __U) +{ + return (__m256h) __builtin_ia32_vfmaddsubph256_mask3 ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) + __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmaddsub_ph (__mmask16 __U, __m256h __A, __m256h __B, + __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmaddsubph256_maskz ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) + __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmaddsub_ph (__m128h __A, __m128h __B, __m128h __C) +{ + return (__m128h)__builtin_ia32_vfmaddsubph128_mask ((__v8hf)__A, + (__v8hf)__B, + (__v8hf)__C, + (__mmask8)-1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmaddsub_ph (__m128h __A, __mmask8 __U, __m128h __B, + __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmaddsubph128_mask ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmaddsub_ph (__m128h __A, __m128h __B, __m128h __C, + __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfmaddsubph128_mask3 ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) + __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmaddsub_ph (__mmask8 __U, __m128h __A, __m128h __B, + __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmaddsubph128_maskz ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) + __U); +} + +/* Intrinsics vfmsubadd[132,213,231]ph. */ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmsubadd_ph (__m256h __A, __m256h __B, __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmsubaddph256_mask ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmsubadd_ph (__m256h __A, __mmask16 __U, __m256h __B, + __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmsubaddph256_mask ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmsubadd_ph (__m256h __A, __m256h __B, __m256h __C, + __mmask16 __U) +{ + return (__m256h) __builtin_ia32_vfmsubaddph256_mask3 ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) + __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmsubadd_ph (__mmask16 __U, __m256h __A, __m256h __B, + __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmsubaddph256_maskz ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) + __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsubadd_ph (__m128h __A, __m128h __B, __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmsubaddph128_mask ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsubadd_ph (__m128h __A, __mmask8 __U, __m128h __B, + __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmsubaddph128_mask ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsubadd_ph (__m128h __A, __m128h __B, __m128h __C, + __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfmsubaddph128_mask3 ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) + __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsubadd_ph (__mmask8 __U, __m128h __A, __m128h __B, + __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmsubaddph128_maskz ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) + __U); +} + +/* Intrinsics vfmadd[132,213,231]ph. */ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmadd_ph (__m256h __A, __m256h __B, __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmaddph256_mask ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmadd_ph (__m256h __A, __mmask16 __U, __m256h __B, + __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmaddph256_mask ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmadd_ph (__m256h __A, __m256h __B, __m256h __C, + __mmask16 __U) +{ + return (__m256h) __builtin_ia32_vfmaddph256_mask3 ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) + __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmadd_ph (__mmask16 __U, __m256h __A, __m256h __B, + __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmaddph256_maskz ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) + __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_ph (__m128h __A, __m128h __B, __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmaddph128_mask ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_ph (__m128h __A, __mmask8 __U, __m128h __B, + __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmaddph128_mask ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_ph (__m128h __A, __m128h __B, __m128h __C, + __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfmaddph128_mask3 ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) + __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_ph (__mmask8 __U, __m128h __A, __m128h __B, + __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmaddph128_maskz ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) + __U); +} + +/* Intrinsics vfnmadd[132,213,231]ph. */ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fnmadd_ph (__m256h __A, __m256h __B, __m256h __C) +{ + return (__m256h) __builtin_ia32_vfnmaddph256_mask ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fnmadd_ph (__m256h __A, __mmask16 __U, __m256h __B, + __m256h __C) +{ + return (__m256h) __builtin_ia32_vfnmaddph256_mask ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fnmadd_ph (__m256h __A, __m256h __B, __m256h __C, + __mmask16 __U) +{ + return (__m256h) __builtin_ia32_vfnmaddph256_mask3 ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) + __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fnmadd_ph (__mmask16 __U, __m256h __A, __m256h __B, + __m256h __C) +{ + return (__m256h) __builtin_ia32_vfnmaddph256_maskz ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) + __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmadd_ph (__m128h __A, __m128h __B, __m128h __C) +{ + return (__m128h) __builtin_ia32_vfnmaddph128_mask ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmadd_ph (__m128h __A, __mmask8 __U, __m128h __B, + __m128h __C) +{ + return (__m128h) __builtin_ia32_vfnmaddph128_mask ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmadd_ph (__m128h __A, __m128h __B, __m128h __C, + __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfnmaddph128_mask3 ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) + __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmadd_ph (__mmask8 __U, __m128h __A, __m128h __B, + __m128h __C) +{ + return (__m128h) __builtin_ia32_vfnmaddph128_maskz ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) + __U); +} + +/* Intrinsics vfmsub[132,213,231]ph. */ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmsub_ph (__m256h __A, __m256h __B, __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmsubph256_mask ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmsub_ph (__m256h __A, __mmask16 __U, __m256h __B, + __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmsubph256_mask ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmsub_ph (__m256h __A, __m256h __B, __m256h __C, + __mmask16 __U) +{ + return (__m256h) __builtin_ia32_vfmsubph256_mask3 ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) + __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmsub_ph (__mmask16 __U, __m256h __A, __m256h __B, + __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmsubph256_maskz ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) + __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsub_ph (__m128h __A, __m128h __B, __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmsubph128_mask ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsub_ph (__m128h __A, __mmask8 __U, __m128h __B, + __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmsubph128_mask ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsub_ph (__m128h __A, __m128h __B, __m128h __C, + __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfmsubph128_mask3 ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) + __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsub_ph (__mmask8 __U, __m128h __A, __m128h __B, + __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmsubph128_maskz ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) + __U); +} + +/* Intrinsics vfnmsub[132,213,231]ph. */ +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fnmsub_ph (__m256h __A, __m256h __B, __m256h __C) +{ + return (__m256h) __builtin_ia32_vfnmsubph256_mask ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) -1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fnmsub_ph (__m256h __A, __mmask16 __U, __m256h __B, + __m256h __C) +{ + return (__m256h) __builtin_ia32_vfnmsubph256_mask ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fnmsub_ph (__m256h __A, __m256h __B, __m256h __C, + __mmask16 __U) +{ + return (__m256h) __builtin_ia32_vfnmsubph256_mask3 ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) + __U); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fnmsub_ph (__mmask16 __U, __m256h __A, __m256h __B, + __m256h __C) +{ + return (__m256h) __builtin_ia32_vfnmsubph256_maskz ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, + (__mmask16) + __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmsub_ph (__m128h __A, __m128h __B, __m128h __C) +{ + return (__m128h) __builtin_ia32_vfnmsubph128_mask ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) -1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmsub_ph (__m128h __A, __mmask8 __U, __m128h __B, + __m128h __C) +{ + return (__m128h) __builtin_ia32_vfnmsubph128_mask ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmsub_ph (__m128h __A, __m128h __B, __m128h __C, + __mmask8 __U) +{ + return (__m128h) __builtin_ia32_vfnmsubph128_mask3 ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) + __U); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmsub_ph (__mmask8 __U, __m128h __A, __m128h __B, + __m128h __C) +{ + return (__m128h) __builtin_ia32_vfnmsubph128_maskz ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, + (__mmask8) + __U); +} + +/* Intrinsics vf[,c]maddcph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_pch (__m128h __A, __m128h __B, __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmaddcph128 ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_pch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return (__m128h) + __builtin_ia32_vfmaddcph128_mask ((__v8hf) __A, + (__v8hf) __C, + (__v8hf) __D, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_pch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D) +{ + return (__m128h) + __builtin_ia32_vfmaddcph128_mask3 ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_pch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D) +{ + return (__m128h) __builtin_ia32_vfmaddcph128_maskz ((__v8hf) __B, + (__v8hf) __C, + (__v8hf) __D, __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmadd_pch (__m256h __A, __m256h __B, __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmaddcph256 ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmadd_pch (__m256h __A, __mmask8 __B, __m256h __C, __m256h __D) +{ + return (__m256h) + __builtin_ia32_vfmaddcph256_mask ((__v16hf) __A, + (__v16hf) __C, + (__v16hf) __D, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmadd_pch (__m256h __A, __m256h __B, __m256h __C, __mmask8 __D) +{ + return (__m256h) + __builtin_ia32_vfmaddcph256_mask3 ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, __D); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmadd_pch (__mmask8 __A, __m256h __B, __m256h __C, __m256h __D) +{ + return (__m256h)__builtin_ia32_vfmaddcph256_maskz ((__v16hf) __B, + (__v16hf) __C, + (__v16hf) __D, __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fcmadd_pch (__m128h __A, __m128h __B, __m128h __C) +{ + return (__m128h) __builtin_ia32_vfcmaddcph128 ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fcmadd_pch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return (__m128h) + __builtin_ia32_vfcmaddcph128_mask ((__v8hf) __A, + (__v8hf) __C, + (__v8hf) __D, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fcmadd_pch (__m128h __A, __m128h __B, __m128h __C, __mmask8 __D) +{ + return (__m128h) + __builtin_ia32_vfcmaddcph128_mask3 ((__v8hf) __A, + (__v8hf) __B, + (__v8hf) __C, __D); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fcmadd_pch (__mmask8 __A, __m128h __B, __m128h __C, __m128h __D) +{ + return (__m128h)__builtin_ia32_vfcmaddcph128_maskz ((__v8hf) __B, + (__v8hf) __C, + (__v8hf) __D, __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fcmadd_pch (__m256h __A, __m256h __B, __m256h __C) +{ + return (__m256h) __builtin_ia32_vfcmaddcph256 ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fcmadd_pch (__m256h __A, __mmask8 __B, __m256h __C, __m256h __D) +{ + return (__m256h) + __builtin_ia32_vfcmaddcph256_mask ((__v16hf) __A, + (__v16hf) __C, + (__v16hf) __D, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fcmadd_pch (__m256h __A, __m256h __B, __m256h __C, __mmask8 __D) +{ + return (__m256h) + __builtin_ia32_vfcmaddcph256_mask3 ((__v16hf) __A, + (__v16hf) __B, + (__v16hf) __C, __D); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fcmadd_pch (__mmask8 __A, __m256h __B, __m256h __C, __m256h __D) +{ + return (__m256h) __builtin_ia32_vfcmaddcph256_maskz ((__v16hf) __B, + (__v16hf) __C, + (__v16hf) __D, __A); +} + +/* Intrinsics vf[,c]mulcph. */ +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmul_pch (__m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfmulcph128 ((__v8hf) __A, (__v8hf) __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmul_pch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return (__m128h) __builtin_ia32_vfmulcph128_mask ((__v8hf) __C, + (__v8hf) __D, + (__v8hf) __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmul_pch (__mmask8 __A, __m128h __B, __m128h __C) +{ + return (__m128h) __builtin_ia32_vfmulcph128_mask ((__v8hf) __B, + (__v8hf) __C, + _mm_setzero_ph (), + __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmul_pch (__m256h __A, __m256h __B) +{ + return (__m256h) __builtin_ia32_vfmulcph256 ((__v16hf) __A, + (__v16hf) __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmul_pch (__m256h __A, __mmask8 __B, __m256h __C, __m256h __D) +{ + return (__m256h) __builtin_ia32_vfmulcph256_mask ((__v16hf) __C, + (__v16hf) __D, + (__v16hf) __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmul_pch (__mmask8 __A, __m256h __B, __m256h __C) +{ + return (__m256h) __builtin_ia32_vfmulcph256_mask ((__v16hf) __B, + (__v16hf) __C, + _mm256_setzero_ph (), + __A); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fcmul_pch (__m128h __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_vfcmulcph128 ((__v8hf) __A, + (__v8hf) __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fcmul_pch (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D) +{ + return (__m128h) __builtin_ia32_vfcmulcph128_mask ((__v8hf) __C, + (__v8hf) __D, + (__v8hf) __A, __B); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fcmul_pch (__mmask8 __A, __m128h __B, __m128h __C) +{ + return (__m128h) __builtin_ia32_vfcmulcph128_mask ((__v8hf) __B, + (__v8hf) __C, + _mm_setzero_ph (), + __A); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fcmul_pch (__m256h __A, __m256h __B) +{ + return (__m256h) __builtin_ia32_vfcmulcph256 ((__v16hf) __A, (__v16hf) __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fcmul_pch (__m256h __A, __mmask8 __B, __m256h __C, __m256h __D) +{ + return (__m256h) __builtin_ia32_vfcmulcph256_mask ((__v16hf) __C, + (__v16hf) __D, + (__v16hf) __A, __B); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fcmul_pch (__mmask8 __A, __m256h __B, __m256h __C) +{ + return (__m256h) __builtin_ia32_vfcmulcph256_mask ((__v16hf) __B, + (__v16hf) __C, + _mm256_setzero_ph (), + __A); +} + +#define _MM256_REDUCE_OP(op) \ + __m128h __T1 = (__m128h) _mm256_extractf128_pd ((__m256d) __A, 0); \ + __m128h __T2 = (__m128h) _mm256_extractf128_pd ((__m256d) __A, 1); \ + __m128h __T3 = (__T1 op __T2); \ + __m128h __T4 = (__m128h) __builtin_shuffle (__T3, \ + (__v8hi) { 4, 5, 6, 7, 0, 1, 2, 3 }); \ + __m128h __T5 = (__T3) op (__T4); \ + __m128h __T6 = (__m128h) __builtin_shuffle (__T5, \ + (__v8hi) { 2, 3, 0, 1, 4, 5, 6, 7 }); \ + __m128h __T7 = __T5 op __T6; \ + return __T7[0] op __T7[1] + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_reduce_add_ph (__m256h __A) +{ + _MM256_REDUCE_OP (+); +} + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_reduce_mul_ph (__m256h __A) +{ + _MM256_REDUCE_OP (*); +} + +#undef _MM256_REDUCE_OP +#define _MM256_REDUCE_OP(op) \ + __m128h __T1 = (__m128h) _mm256_extractf128_pd ((__m256d) __A, 0); \ + __m128h __T2 = (__m128h) _mm256_extractf128_pd ((__m256d) __A, 1); \ + __m128h __T3 = _mm_##op (__T1, __T2); \ + __m128h __T4 = (__m128h) __builtin_shuffle (__T3, \ + (__v8hi) { 2, 3, 0, 1, 6, 7, 4, 5 }); \ + __m128h __T5 = _mm_##op (__T3, __T4); \ + __m128h __T6 = (__m128h) __builtin_shuffle (__T5, (__v8hi) { 4, 5 }); \ + __m128h __T7 = _mm_##op (__T5, __T6); \ + __m128h __T8 = (__m128h) __builtin_shuffle (__T7, (__v8hi) { 1, 0 }); \ + __m128h __T9 = _mm_##op (__T7, __T8); \ + return __T9[0] + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_reduce_min_ph (__m256h __A) +{ + _MM256_REDUCE_OP (min_ph); +} + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_reduce_max_ph (__m256h __A) +{ + _MM256_REDUCE_OP (max_ph); +} + +#define _MM_REDUCE_OP(op) \ + __m128h __T1 = (__m128h) __builtin_shuffle (__A, \ + (__v8hi) { 4, 5, 6, 7, 0, 1, 2, 3 }); \ + __m128h __T2 = (__A) op (__T1); \ + __m128h __T3 = (__m128h) __builtin_shuffle (__T2, \ + (__v8hi){ 2, 3, 0, 1, 4, 5, 6, 7 }); \ + __m128h __T4 = __T2 op __T3; \ + return __T4[0] op __T4[1] + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_add_ph (__m128h __A) +{ + _MM_REDUCE_OP (+); +} + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_mul_ph (__m128h __A) +{ + _MM_REDUCE_OP (*); +} + +#undef _MM_REDUCE_OP +#define _MM_REDUCE_OP(op) \ + __m128h __T1 = (__m128h) __builtin_shuffle (__A, \ + (__v8hi) { 2, 3, 0, 1, 6, 7, 4, 5 }); \ + __m128h __T2 = _mm_##op (__A, __T1); \ + __m128h __T3 = (__m128h) __builtin_shuffle (__T2, (__v8hi){ 4, 5 }); \ + __m128h __T4 = _mm_##op (__T2, __T3); \ + __m128h __T5 = (__m128h) __builtin_shuffle (__T4, (__v8hi){ 1, 0 }); \ + __m128h __T6 = _mm_##op (__T4, __T5); \ + return __T6[0] + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_min_ph (__m128h __A) +{ + _MM_REDUCE_OP (min_ph); +} + +extern __inline _Float16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_max_ph (__m128h __A) +{ + _MM_REDUCE_OP (max_ph); +} + +#undef _MM256_REDUCE_OP +#undef _MM_REDUCE_OP + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_blend_ph (__mmask16 __U, __m256h __A, __m256h __W) +{ + return (__m256h) __builtin_ia32_movdquhi256_mask ((__v16hi) __W, + (__v16hi) __A, + (__mmask16) __U); + +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutex2var_ph (__m256h __A, __m256i __I, __m256h __B) +{ + return (__m256h) __builtin_ia32_vpermi2varhi256_mask ((__v16hi) __A, + (__v16hi) __I, + (__v16hi) __B, + (__mmask16)-1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutexvar_ph (__m256i __A, __m256h __B) +{ + return (__m256h) __builtin_ia32_permvarhi256_mask ((__v16hi) __B, + (__v16hi) __A, + (__v16hi) + (_mm256_setzero_ph ()), + (__mmask16)-1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_blend_ph (__mmask8 __U, __m128h __A, __m128h __W) +{ + return (__m128h) __builtin_ia32_movdquhi128_mask ((__v8hi) __W, + (__v8hi) __A, + (__mmask8) __U); + +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutex2var_ph (__m128h __A, __m128i __I, __m128h __B) +{ + return (__m128h) __builtin_ia32_vpermi2varhi128_mask ((__v8hi) __A, + (__v8hi) __I, + (__v8hi) __B, + (__mmask8)-1); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutexvar_ph (__m128i __A, __m128h __B) +{ + return (__m128h) __builtin_ia32_permvarhi128_mask ((__v8hi) __B, + (__v8hi) __A, + (__v8hi) + (_mm_setzero_ph ()), + (__mmask8)-1); +} + +extern __inline __m256h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_pch (_Float16 _Complex __A) +{ + union + { + _Float16 _Complex a; + float b; + } u = { .a = __A }; + + return (__m256h) _mm256_set1_ps (u.b); +} + +extern __inline __m128h +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_pch (_Float16 _Complex __A) +{ + union + { + _Float16 _Complex a; + float b; + } u = { .a = __A }; + + return (__m128h) _mm_set1_ps (u.b); +} + +// intrinsics below are alias for f*mul_*ch +#define _mm_mul_pch(A, B) _mm_fmul_pch ((A), (B)) +#define _mm_mask_mul_pch(W, U, A, B) _mm_mask_fmul_pch ((W), (U), (A), (B)) +#define _mm_maskz_mul_pch(U, A, B) _mm_maskz_fmul_pch ((U), (A), (B)) +#define _mm256_mul_pch(A, B) _mm256_fmul_pch ((A), (B)) +#define _mm256_mask_mul_pch(W, U, A, B) \ + _mm256_mask_fmul_pch ((W), (U), (A), (B)) +#define _mm256_maskz_mul_pch(U, A, B) _mm256_maskz_fmul_pch ((U), (A), (B)) + +#define _mm_cmul_pch(A, B) _mm_fcmul_pch ((A), (B)) +#define _mm_mask_cmul_pch(W, U, A, B) _mm_mask_fcmul_pch ((W), (U), (A), (B)) +#define _mm_maskz_cmul_pch(U, A, B) _mm_maskz_fcmul_pch ((U), (A), (B)) +#define _mm256_cmul_pch(A, B) _mm256_fcmul_pch ((A), (B)) +#define _mm256_mask_cmul_pch(W, U, A, B) \ + _mm256_mask_fcmul_pch ((W), (U), (A), (B)) +#define _mm256_maskz_cmul_pch(U, A, B) _mm256_maskz_fcmul_pch((U), (A), (B)) + +#ifdef __DISABLE_AVX512FP16VL__ +#undef __DISABLE_AVX512FP16VL__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512FP16VL__ */ + +#endif /* __AVX512FP16VLINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512ifmaintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512ifmaintrin.h new file mode 100644 index 0000000..3676848 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512ifmaintrin.h @@ -0,0 +1,104 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512IFMAINTRIN_H_INCLUDED +#define _AVX512IFMAINTRIN_H_INCLUDED + +#ifndef __AVX512IFMA__ +#pragma GCC push_options +#pragma GCC target("avx512ifma") +#define __DISABLE_AVX512IFMA__ +#endif /* __AVX512IFMA__ */ + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_madd52lo_epu64 (__m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i) __builtin_ia32_vpmadd52luq512_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) __Z, + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_madd52hi_epu64 (__m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i) __builtin_ia32_vpmadd52huq512_mask ((__v8di) __X, + (__v8di) __Y, + (__v8di) __Z, + (__mmask8) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_madd52lo_epu64 (__m512i __W, __mmask8 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i) __builtin_ia32_vpmadd52luq512_mask ((__v8di) __W, + (__v8di) __X, + (__v8di) __Y, + (__mmask8) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_madd52hi_epu64 (__m512i __W, __mmask8 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i) __builtin_ia32_vpmadd52huq512_mask ((__v8di) __W, + (__v8di) __X, + (__v8di) __Y, + (__mmask8) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_madd52lo_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i) __builtin_ia32_vpmadd52luq512_maskz ((__v8di) __X, + (__v8di) __Y, + (__v8di) __Z, + (__mmask8) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_madd52hi_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i) __builtin_ia32_vpmadd52huq512_maskz ((__v8di) __X, + (__v8di) __Y, + (__v8di) __Z, + (__mmask8) __M); +} + +#ifdef __DISABLE_AVX512IFMA__ +#undef __DISABLE_AVX512IFMA__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512IFMA__ */ + +#endif /* _AVX512IFMAINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512ifmavlintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512ifmavlintrin.h new file mode 100644 index 0000000..a7a50d8 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512ifmavlintrin.h @@ -0,0 +1,164 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512IFMAVLINTRIN_H_INCLUDED +#define _AVX512IFMAVLINTRIN_H_INCLUDED + +#if !defined(__AVX512VL__) || !defined(__AVX512IFMA__) +#pragma GCC push_options +#pragma GCC target("avx512ifma,avx512vl") +#define __DISABLE_AVX512IFMAVL__ +#endif /* __AVX512IFMAVL__ */ + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_madd52lo_epu64 (__m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i) __builtin_ia32_vpmadd52luq128_mask ((__v2di) __X, + (__v2di) __Y, + (__v2di) __Z, + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_madd52hi_epu64 (__m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i) __builtin_ia32_vpmadd52huq128_mask ((__v2di) __X, + (__v2di) __Y, + (__v2di) __Z, + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_madd52lo_epu64 (__m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i) __builtin_ia32_vpmadd52luq256_mask ((__v4di) __X, + (__v4di) __Y, + (__v4di) __Z, + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_madd52hi_epu64 (__m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i) __builtin_ia32_vpmadd52huq256_mask ((__v4di) __X, + (__v4di) __Y, + (__v4di) __Z, + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_madd52lo_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_vpmadd52luq128_mask ((__v2di) __W, + (__v2di) __X, + (__v2di) __Y, + (__mmask8) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_madd52hi_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_vpmadd52huq128_mask ((__v2di) __W, + (__v2di) __X, + (__v2di) __Y, + (__mmask8) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_madd52lo_epu64 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_vpmadd52luq256_mask ((__v4di) __W, + (__v4di) __X, + (__v4di) __Y, + (__mmask8) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_madd52hi_epu64 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_vpmadd52huq256_mask ((__v4di) __W, + (__v4di) __X, + (__v4di) __Y, + (__mmask8) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_madd52lo_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i) __builtin_ia32_vpmadd52luq128_maskz ((__v2di) __X, + (__v2di) __Y, + (__v2di) __Z, + (__mmask8) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_madd52hi_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i) __builtin_ia32_vpmadd52huq128_maskz ((__v2di) __X, + (__v2di) __Y, + (__v2di) __Z, + (__mmask8) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_madd52lo_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i) __builtin_ia32_vpmadd52luq256_maskz ((__v4di) __X, + (__v4di) __Y, + (__v4di) __Z, + (__mmask8) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_madd52hi_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i) __builtin_ia32_vpmadd52huq256_maskz ((__v4di) __X, + (__v4di) __Y, + (__v4di) __Z, + (__mmask8) __M); +} + +#ifdef __DISABLE_AVX512IFMAVL__ +#undef __DISABLE_AVX512IFMAVL__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512IFMAVL__ */ + +#endif /* _AVX512IFMAVLINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512pfintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512pfintrin.h new file mode 100644 index 0000000..3d44c3a --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512pfintrin.h @@ -0,0 +1,269 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512PFINTRIN_H_INCLUDED +#define _AVX512PFINTRIN_H_INCLUDED + +#ifndef __AVX512PF__ +#pragma GCC push_options +#pragma GCC target("avx512pf") +#define __DISABLE_AVX512PF__ +#endif /* __AVX512PF__ */ + +/* Internal data types for implementing the intrinsics. */ +typedef long long __v8di __attribute__ ((__vector_size__ (64))); +typedef int __v16si __attribute__ ((__vector_size__ (64))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef long long __m512i __attribute__ ((__vector_size__ (64), __may_alias__)); + +typedef unsigned char __mmask8; +typedef unsigned short __mmask16; + +#ifdef __OPTIMIZE__ +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_prefetch_i32gather_pd (__m256i __index, void const *__addr, + int __scale, int __hint) +{ + __builtin_ia32_gatherpfdpd ((__mmask8) 0xFF, (__v8si) __index, __addr, + __scale, __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_prefetch_i32gather_ps (__m512i __index, void const *__addr, + int __scale, int __hint) +{ + __builtin_ia32_gatherpfdps ((__mmask16) 0xFFFF, (__v16si) __index, __addr, + __scale, __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_prefetch_i32gather_pd (__m256i __index, __mmask8 __mask, + void const *__addr, int __scale, int __hint) +{ + __builtin_ia32_gatherpfdpd (__mask, (__v8si) __index, __addr, __scale, + __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_prefetch_i32gather_ps (__m512i __index, __mmask16 __mask, + void const *__addr, int __scale, int __hint) +{ + __builtin_ia32_gatherpfdps (__mask, (__v16si) __index, __addr, __scale, + __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_prefetch_i64gather_pd (__m512i __index, void const *__addr, + int __scale, int __hint) +{ + __builtin_ia32_gatherpfqpd ((__mmask8) 0xFF, (__v8di) __index, __addr, + __scale, __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_prefetch_i64gather_ps (__m512i __index, void const *__addr, + int __scale, int __hint) +{ + __builtin_ia32_gatherpfqps ((__mmask8) 0xFF, (__v8di) __index, __addr, + __scale, __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_prefetch_i64gather_pd (__m512i __index, __mmask8 __mask, + void const *__addr, int __scale, int __hint) +{ + __builtin_ia32_gatherpfqpd (__mask, (__v8di) __index, __addr, __scale, + __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_prefetch_i64gather_ps (__m512i __index, __mmask8 __mask, + void const *__addr, int __scale, int __hint) +{ + __builtin_ia32_gatherpfqps (__mask, (__v8di) __index, __addr, __scale, + __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_prefetch_i32scatter_pd (void *__addr, __m256i __index, int __scale, + int __hint) +{ + __builtin_ia32_scatterpfdpd ((__mmask8) 0xFF, (__v8si) __index, __addr, + __scale, __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_prefetch_i32scatter_ps (void *__addr, __m512i __index, int __scale, + int __hint) +{ + __builtin_ia32_scatterpfdps ((__mmask16) 0xFFFF, (__v16si) __index, __addr, + __scale, __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_prefetch_i32scatter_pd (void *__addr, __mmask8 __mask, + __m256i __index, int __scale, int __hint) +{ + __builtin_ia32_scatterpfdpd (__mask, (__v8si) __index, __addr, __scale, + __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_prefetch_i32scatter_ps (void *__addr, __mmask16 __mask, + __m512i __index, int __scale, int __hint) +{ + __builtin_ia32_scatterpfdps (__mask, (__v16si) __index, __addr, __scale, + __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_prefetch_i64scatter_pd (void *__addr, __m512i __index, int __scale, + int __hint) +{ + __builtin_ia32_scatterpfqpd ((__mmask8) 0xFF, (__v8di) __index,__addr, + __scale, __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_prefetch_i64scatter_ps (void *__addr, __m512i __index, int __scale, + int __hint) +{ + __builtin_ia32_scatterpfqps ((__mmask8) 0xFF, (__v8di) __index, __addr, + __scale, __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_prefetch_i64scatter_pd (void *__addr, __mmask8 __mask, + __m512i __index, int __scale, int __hint) +{ + __builtin_ia32_scatterpfqpd (__mask, (__v8di) __index, __addr, __scale, + __hint); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_prefetch_i64scatter_ps (void *__addr, __mmask8 __mask, + __m512i __index, int __scale, int __hint) +{ + __builtin_ia32_scatterpfqps (__mask, (__v8di) __index, __addr, __scale, + __hint); +} + +#else +#define _mm512_prefetch_i32gather_pd(INDEX, ADDR, SCALE, HINT) \ + __builtin_ia32_gatherpfdpd ((__mmask8)0xFF, (__v8si)(__m256i) (INDEX), \ + (void const *) (ADDR), (int) (SCALE), \ + (int) (HINT)) + +#define _mm512_prefetch_i32gather_ps(INDEX, ADDR, SCALE, HINT) \ + __builtin_ia32_gatherpfdps ((__mmask16)0xFFFF, (__v16si)(__m512i) (INDEX), \ + (void const *) (ADDR), (int) (SCALE), \ + (int) (HINT)) + +#define _mm512_mask_prefetch_i32gather_pd(INDEX, MASK, ADDR, SCALE, HINT) \ + __builtin_ia32_gatherpfdpd ((__mmask8) (MASK), (__v8si)(__m256i) (INDEX), \ + (void const *) (ADDR), (int) (SCALE), \ + (int) (HINT)) + +#define _mm512_mask_prefetch_i32gather_ps(INDEX, MASK, ADDR, SCALE, HINT) \ + __builtin_ia32_gatherpfdps ((__mmask16) (MASK), (__v16si)(__m512i) (INDEX),\ + (void const *) (ADDR), (int) (SCALE), \ + (int) (HINT)) + +#define _mm512_prefetch_i64gather_pd(INDEX, ADDR, SCALE, HINT) \ + __builtin_ia32_gatherpfqpd ((__mmask8)0xFF, (__v8di)(__m512i) (INDEX), \ + (void *) (ADDR), (int) (SCALE), (int) (HINT)) + +#define _mm512_prefetch_i64gather_ps(INDEX, ADDR, SCALE, HINT) \ + __builtin_ia32_gatherpfqps ((__mmask8)0xFF, (__v8di)(__m512i) (INDEX), \ + (void *) (ADDR), (int) (SCALE), (int) (HINT)) + +#define _mm512_mask_prefetch_i64gather_pd(INDEX, MASK, ADDR, SCALE, HINT) \ + __builtin_ia32_gatherpfqpd ((__mmask8) (MASK), (__v8di)(__m512i) (INDEX), \ + (void *) (ADDR), (int) (SCALE), (int) (HINT)) + +#define _mm512_mask_prefetch_i64gather_ps(INDEX, MASK, ADDR, SCALE, HINT) \ + __builtin_ia32_gatherpfqps ((__mmask8) (MASK), (__v8di)(__m512i) (INDEX), \ + (void *) (ADDR), (int) (SCALE), (int) (HINT)) + +#define _mm512_prefetch_i32scatter_pd(ADDR, INDEX, SCALE, HINT) \ + __builtin_ia32_scatterpfdpd ((__mmask8)0xFF, (__v8si)(__m256i) (INDEX), \ + (void *) (ADDR), (int) (SCALE), (int) (HINT)) + +#define _mm512_prefetch_i32scatter_ps(ADDR, INDEX, SCALE, HINT) \ + __builtin_ia32_scatterpfdps ((__mmask16)0xFFFF, (__v16si)(__m512i) (INDEX),\ + (void *) (ADDR), (int) (SCALE), (int) (HINT)) + +#define _mm512_mask_prefetch_i32scatter_pd(ADDR, MASK, INDEX, SCALE, HINT) \ + __builtin_ia32_scatterpfdpd ((__mmask8) (MASK), (__v8si)(__m256i) (INDEX), \ + (void *) (ADDR), (int) (SCALE), (int) (HINT)) + +#define _mm512_mask_prefetch_i32scatter_ps(ADDR, MASK, INDEX, SCALE, HINT) \ + __builtin_ia32_scatterpfdps ((__mmask16) (MASK), \ + (__v16si)(__m512i) (INDEX), \ + (void *) (ADDR), (int) (SCALE), (int) (HINT)) + +#define _mm512_prefetch_i64scatter_pd(ADDR, INDEX, SCALE, HINT) \ + __builtin_ia32_scatterpfqpd ((__mmask8)0xFF, (__v8di)(__m512i) (INDEX), \ + (void *) (ADDR), (int) (SCALE), (int) (HINT)) + +#define _mm512_prefetch_i64scatter_ps(ADDR, INDEX, SCALE, HINT) \ + __builtin_ia32_scatterpfqps ((__mmask8)0xFF, (__v8di)(__m512i) (INDEX), \ + (void *) (ADDR), (int) (SCALE), (int) (HINT)) + +#define _mm512_mask_prefetch_i64scatter_pd(ADDR, MASK, INDEX, SCALE, HINT) \ + __builtin_ia32_scatterpfqpd ((__mmask8) (MASK), (__v8di)(__m512i) (INDEX), \ + (void *) (ADDR), (int) (SCALE), (int) (HINT)) + +#define _mm512_mask_prefetch_i64scatter_ps(ADDR, MASK, INDEX, SCALE, HINT) \ + __builtin_ia32_scatterpfqps ((__mmask8) (MASK), (__v8di)(__m512i) (INDEX), \ + (void *) (ADDR), (int) (SCALE), (int) (HINT)) +#endif + +#ifdef __DISABLE_AVX512PF__ +#undef __DISABLE_AVX512PF__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512PF__ */ + +#endif /* _AVX512PFINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vbmi2intrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vbmi2intrin.h new file mode 100644 index 0000000..6050c38 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vbmi2intrin.h @@ -0,0 +1,557 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VBMI2INTRIN_H_INCLUDED +#define __AVX512VBMI2INTRIN_H_INCLUDED + +#if !defined(__AVX512VBMI2__) +#pragma GCC push_options +#pragma GCC target("avx512vbmi2") +#define __DISABLE_AVX512VBMI2__ +#endif /* __AVX512VBMI2__ */ + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shrdi_epi16 (__m512i __A, __m512i __B, int __C) +{ + return (__m512i) __builtin_ia32_vpshrd_v32hi ((__v32hi)__A, (__v32hi) __B, + __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shrdi_epi32 (__m512i __A, __m512i __B, int __C) +{ + return (__m512i) __builtin_ia32_vpshrd_v16si ((__v16si)__A, (__v16si) __B, + __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shrdi_epi32 (__m512i __A, __mmask16 __B, __m512i __C, __m512i __D, + int __E) +{ + return (__m512i)__builtin_ia32_vpshrd_v16si_mask ((__v16si)__C, + (__v16si) __D, __E, (__v16si) __A, (__mmask16)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shrdi_epi32 (__mmask16 __A, __m512i __B, __m512i __C, int __D) +{ + return (__m512i)__builtin_ia32_vpshrd_v16si_mask ((__v16si)__B, + (__v16si) __C, __D, (__v16si) _mm512_setzero_si512 (), (__mmask16)__A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shrdi_epi64 (__m512i __A, __m512i __B, int __C) +{ + return (__m512i) __builtin_ia32_vpshrd_v8di ((__v8di)__A, (__v8di) __B, __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shrdi_epi64 (__m512i __A, __mmask8 __B, __m512i __C, __m512i __D, + int __E) +{ + return (__m512i)__builtin_ia32_vpshrd_v8di_mask ((__v8di)__C, (__v8di) __D, + __E, (__v8di) __A, (__mmask8)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shrdi_epi64 (__mmask8 __A, __m512i __B, __m512i __C, int __D) +{ + return (__m512i)__builtin_ia32_vpshrd_v8di_mask ((__v8di)__B, (__v8di) __C, + __D, (__v8di) _mm512_setzero_si512 (), (__mmask8)__A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shldi_epi16 (__m512i __A, __m512i __B, int __C) +{ + return (__m512i) __builtin_ia32_vpshld_v32hi ((__v32hi)__A, (__v32hi) __B, + __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shldi_epi32 (__m512i __A, __m512i __B, int __C) +{ + return (__m512i) __builtin_ia32_vpshld_v16si ((__v16si)__A, (__v16si) __B, + __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shldi_epi32 (__m512i __A, __mmask16 __B, __m512i __C, __m512i __D, + int __E) +{ + return (__m512i)__builtin_ia32_vpshld_v16si_mask ((__v16si)__C, + (__v16si) __D, __E, (__v16si) __A, (__mmask16)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shldi_epi32 (__mmask16 __A, __m512i __B, __m512i __C, int __D) +{ + return (__m512i)__builtin_ia32_vpshld_v16si_mask ((__v16si)__B, + (__v16si) __C, __D, (__v16si) _mm512_setzero_si512 (), (__mmask16)__A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shldi_epi64 (__m512i __A, __m512i __B, int __C) +{ + return (__m512i) __builtin_ia32_vpshld_v8di ((__v8di)__A, (__v8di) __B, __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shldi_epi64 (__m512i __A, __mmask8 __B, __m512i __C, __m512i __D, + int __E) +{ + return (__m512i)__builtin_ia32_vpshld_v8di_mask ((__v8di)__C, (__v8di) __D, + __E, (__v8di) __A, (__mmask8)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shldi_epi64 (__mmask8 __A, __m512i __B, __m512i __C, int __D) +{ + return (__m512i)__builtin_ia32_vpshld_v8di_mask ((__v8di)__B, (__v8di) __C, + __D, (__v8di) _mm512_setzero_si512 (), (__mmask8)__A); +} +#else +#define _mm512_shrdi_epi16(A, B, C) \ + ((__m512i) __builtin_ia32_vpshrd_v32hi ((__v32hi)(__m512i)(A), \ + (__v32hi)(__m512i)(B),(int)(C))) +#define _mm512_shrdi_epi32(A, B, C) \ + ((__m512i) __builtin_ia32_vpshrd_v16si ((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B),(int)(C))) +#define _mm512_mask_shrdi_epi32(A, B, C, D, E) \ + ((__m512i) __builtin_ia32_vpshrd_v16si_mask ((__v16si)(__m512i)(C), \ + (__v16si)(__m512i)(D), \ + (int)(E), \ + (__v16si)(__m512i)(A), \ + (__mmask16)(B))) +#define _mm512_maskz_shrdi_epi32(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vpshrd_v16si_mask ((__v16si)(__m512i)(B), \ + (__v16si)(__m512i)(C),(int)(D), \ + (__v16si)(__m512i)_mm512_setzero_si512 (), \ + (__mmask16)(A))) +#define _mm512_shrdi_epi64(A, B, C) \ + ((__m512i) __builtin_ia32_vpshrd_v8di ((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B),(int)(C))) +#define _mm512_mask_shrdi_epi64(A, B, C, D, E) \ + ((__m512i) __builtin_ia32_vpshrd_v8di_mask ((__v8di)(__m512i)(C), \ + (__v8di)(__m512i)(D), (int)(E), \ + (__v8di)(__m512i)(A), \ + (__mmask8)(B))) +#define _mm512_maskz_shrdi_epi64(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vpshrd_v8di_mask ((__v8di)(__m512i)(B), \ + (__v8di)(__m512i)(C),(int)(D), \ + (__v8di)(__m512i)_mm512_setzero_si512 (), \ + (__mmask8)(A))) +#define _mm512_shldi_epi16(A, B, C) \ + ((__m512i) __builtin_ia32_vpshld_v32hi ((__v32hi)(__m512i)(A), \ + (__v32hi)(__m512i)(B),(int)(C))) +#define _mm512_shldi_epi32(A, B, C) \ + ((__m512i) __builtin_ia32_vpshld_v16si ((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B),(int)(C))) +#define _mm512_mask_shldi_epi32(A, B, C, D, E) \ + ((__m512i) __builtin_ia32_vpshld_v16si_mask ((__v16si)(__m512i)(C), \ + (__v16si)(__m512i)(D), \ + (int)(E), \ + (__v16si)(__m512i)(A), \ + (__mmask16)(B))) +#define _mm512_maskz_shldi_epi32(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vpshld_v16si_mask ((__v16si)(__m512i)(B), \ + (__v16si)(__m512i)(C),(int)(D), \ + (__v16si)(__m512i)_mm512_setzero_si512 (), \ + (__mmask16)(A))) +#define _mm512_shldi_epi64(A, B, C) \ + ((__m512i) __builtin_ia32_vpshld_v8di ((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(C))) +#define _mm512_mask_shldi_epi64(A, B, C, D, E) \ + ((__m512i) __builtin_ia32_vpshld_v8di_mask ((__v8di)(__m512i)(C), \ + (__v8di)(__m512i)(D), (int)(E), \ + (__v8di)(__m512i)(A), \ + (__mmask8)(B))) +#define _mm512_maskz_shldi_epi64(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vpshld_v8di_mask ((__v8di)(__m512i)(B), \ + (__v8di)(__m512i)(C),(int)(D), \ + (__v8di)(__m512i)_mm512_setzero_si512 (), \ + (__mmask8)(A))) +#endif + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shrdv_epi16 (__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_vpshrdv_v32hi ((__v32hi)__A, (__v32hi) __B, + (__v32hi) __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shrdv_epi32 (__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_vpshrdv_v16si ((__v16si)__A, (__v16si) __B, + (__v16si) __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shrdv_epi32 (__m512i __A, __mmask16 __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpshrdv_v16si_mask ((__v16si)__A, + (__v16si) __C, (__v16si) __D, (__mmask16)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shrdv_epi32 (__mmask16 __A, __m512i __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpshrdv_v16si_maskz ((__v16si)__B, + (__v16si) __C, (__v16si) __D, (__mmask16)__A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shrdv_epi64 (__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_vpshrdv_v8di ((__v8di)__A, (__v8di) __B, + (__v8di) __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shrdv_epi64 (__m512i __A, __mmask8 __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpshrdv_v8di_mask ((__v8di)__A, (__v8di) __C, + (__v8di) __D, (__mmask8)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shrdv_epi64 (__mmask8 __A, __m512i __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpshrdv_v8di_maskz ((__v8di)__B, (__v8di) __C, + (__v8di) __D, (__mmask8)__A); +} +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shldv_epi16 (__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_vpshldv_v32hi ((__v32hi)__A, (__v32hi) __B, + (__v32hi) __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shldv_epi32 (__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_vpshldv_v16si ((__v16si)__A, (__v16si) __B, + (__v16si) __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shldv_epi32 (__m512i __A, __mmask16 __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpshldv_v16si_mask ((__v16si)__A, + (__v16si) __C, (__v16si) __D, (__mmask16)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shldv_epi32 (__mmask16 __A, __m512i __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpshldv_v16si_maskz ((__v16si)__B, + (__v16si) __C, (__v16si) __D, (__mmask16)__A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_shldv_epi64 (__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_vpshldv_v8di ((__v8di)__A, (__v8di) __B, + (__v8di) __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shldv_epi64 (__m512i __A, __mmask8 __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpshldv_v8di_mask ((__v8di)__A, (__v8di) __C, + (__v8di) __D, (__mmask8)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shldv_epi64 (__mmask8 __A, __m512i __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpshldv_v8di_maskz ((__v8di)__B, (__v8di) __C, + (__v8di) __D, (__mmask8)__A); +} + +#ifdef __DISABLE_AVX512VBMI2__ +#undef __DISABLE_AVX512VBMI2__ + +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VBMI2__ */ + +#if !defined(__AVX512VBMI2__) || !defined(__AVX512BW__) +#pragma GCC push_options +#pragma GCC target("avx512vbmi2,avx512bw") +#define __DISABLE_AVX512VBMI2BW__ +#endif /* __AVX512VBMI2BW__ */ + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_compress_epi8 (__m512i __A, __mmask64 __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_compressqi512_mask ((__v64qi)__C, + (__v64qi)__A, (__mmask64)__B); +} + + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_compress_epi8 (__mmask64 __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_compressqi512_mask ((__v64qi)__B, + (__v64qi)_mm512_setzero_si512 (), (__mmask64)__A); +} + + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_compressstoreu_epi8 (void * __A, __mmask64 __B, __m512i __C) +{ + __builtin_ia32_compressstoreuqi512_mask ((__v64qi *) __A, (__v64qi) __C, + (__mmask64) __B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_compress_epi16 (__m512i __A, __mmask32 __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_compresshi512_mask ((__v32hi)__C, + (__v32hi)__A, (__mmask32)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_compress_epi16 (__mmask32 __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_compresshi512_mask ((__v32hi)__B, + (__v32hi)_mm512_setzero_si512 (), (__mmask32)__A); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_compressstoreu_epi16 (void * __A, __mmask32 __B, __m512i __C) +{ + __builtin_ia32_compressstoreuhi512_mask ((__v32hi *) __A, (__v32hi) __C, + (__mmask32) __B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_expand_epi8 (__m512i __A, __mmask64 __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_expandqi512_mask ((__v64qi) __C, + (__v64qi) __A, + (__mmask64) __B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_expand_epi8 (__mmask64 __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_expandqi512_maskz ((__v64qi) __B, + (__v64qi) _mm512_setzero_si512 (), (__mmask64) __A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_expandloadu_epi8 (__m512i __A, __mmask64 __B, const void * __C) +{ + return (__m512i) __builtin_ia32_expandloadqi512_mask ((const __v64qi *) __C, + (__v64qi) __A, (__mmask64) __B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_expandloadu_epi8 (__mmask64 __A, const void * __B) +{ + return (__m512i) __builtin_ia32_expandloadqi512_maskz ((const __v64qi *) __B, + (__v64qi) _mm512_setzero_si512 (), (__mmask64) __A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_expand_epi16 (__m512i __A, __mmask32 __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_expandhi512_mask ((__v32hi) __C, + (__v32hi) __A, + (__mmask32) __B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_expand_epi16 (__mmask32 __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_expandhi512_maskz ((__v32hi) __B, + (__v32hi) _mm512_setzero_si512 (), (__mmask32) __A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_expandloadu_epi16 (__m512i __A, __mmask32 __B, const void * __C) +{ + return (__m512i) __builtin_ia32_expandloadhi512_mask ((const __v32hi *) __C, + (__v32hi) __A, (__mmask32) __B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_expandloadu_epi16 (__mmask32 __A, const void * __B) +{ + return (__m512i) __builtin_ia32_expandloadhi512_maskz ((const __v32hi *) __B, + (__v32hi) _mm512_setzero_si512 (), (__mmask32) __A); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shrdi_epi16 (__m512i __A, __mmask32 __B, __m512i __C, __m512i __D, + int __E) +{ + return (__m512i)__builtin_ia32_vpshrd_v32hi_mask ((__v32hi)__C, + (__v32hi) __D, __E, (__v32hi) __A, (__mmask32)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shrdi_epi16 (__mmask32 __A, __m512i __B, __m512i __C, int __D) +{ + return (__m512i)__builtin_ia32_vpshrd_v32hi_mask ((__v32hi)__B, + (__v32hi) __C, __D, (__v32hi) _mm512_setzero_si512 (), (__mmask32)__A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shldi_epi16 (__m512i __A, __mmask32 __B, __m512i __C, __m512i __D, + int __E) +{ + return (__m512i)__builtin_ia32_vpshld_v32hi_mask ((__v32hi)__C, + (__v32hi) __D, __E, (__v32hi) __A, (__mmask32)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shldi_epi16 (__mmask32 __A, __m512i __B, __m512i __C, int __D) +{ + return (__m512i)__builtin_ia32_vpshld_v32hi_mask ((__v32hi)__B, + (__v32hi) __C, __D, (__v32hi) _mm512_setzero_si512 (), (__mmask32)__A); +} + +#else +#define _mm512_mask_shrdi_epi16(A, B, C, D, E) \ + ((__m512i) __builtin_ia32_vpshrd_v32hi_mask ((__v32hi)(__m512i)(C), \ + (__v32hi)(__m512i)(D), \ + (int)(E), \ + (__v32hi)(__m512i)(A), \ + (__mmask32)(B))) +#define _mm512_maskz_shrdi_epi16(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vpshrd_v32hi_mask ((__v32hi)(__m512i)(B), \ + (__v32hi)(__m512i)(C),(int)(D), \ + (__v32hi)(__m512i)_mm512_setzero_si512 (), \ + (__mmask32)(A))) +#define _mm512_mask_shldi_epi16(A, B, C, D, E) \ + ((__m512i) __builtin_ia32_vpshld_v32hi_mask ((__v32hi)(__m512i)(C), \ + (__v32hi)(__m512i)(D), \ + (int)(E), \ + (__v32hi)(__m512i)(A), \ + (__mmask32)(B))) +#define _mm512_maskz_shldi_epi16(A, B, C, D) \ + ((__m512i) \ + __builtin_ia32_vpshld_v32hi_mask ((__v32hi)(__m512i)(B), \ + (__v32hi)(__m512i)(C),(int)(D), \ + (__v32hi)(__m512i)_mm512_setzero_si512 (), \ + (__mmask32)(A))) +#endif + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shrdv_epi16 (__m512i __A, __mmask32 __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpshrdv_v32hi_mask ((__v32hi)__A, + (__v32hi) __C, (__v32hi) __D, (__mmask32)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shrdv_epi16 (__mmask32 __A, __m512i __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpshrdv_v32hi_maskz ((__v32hi)__B, + (__v32hi) __C, (__v32hi) __D, (__mmask32)__A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_shldv_epi16 (__m512i __A, __mmask32 __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpshldv_v32hi_mask ((__v32hi)__A, + (__v32hi) __C, (__v32hi) __D, (__mmask32)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_shldv_epi16 (__mmask32 __A, __m512i __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpshldv_v32hi_maskz ((__v32hi)__B, + (__v32hi) __C, (__v32hi) __D, (__mmask32)__A); +} + +#ifdef __DISABLE_AVX512VBMI2BW__ +#undef __DISABLE_AVX512VBMI2BW__ + +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VBMI2BW__ */ + +#endif /* __AVX512VBMI2INTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vbmi2vlintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vbmi2vlintrin.h new file mode 100644 index 0000000..47c4413 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vbmi2vlintrin.h @@ -0,0 +1,1037 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VBMI2VLINTRIN_H_INCLUDED +#define _AVX512VBMI2VLINTRIN_H_INCLUDED + +#if !defined(__AVX512VL__) || !defined(__AVX512VBMI2__) +#pragma GCC push_options +#pragma GCC target("avx512vbmi2,avx512vl") +#define __DISABLE_AVX512VBMI2VL__ +#endif /* __AVX512VBMIVL__ */ + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_compress_epi8 (__m128i __A, __mmask16 __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi)__C, + (__v16qi)__A, (__mmask16)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_compress_epi8 (__mmask16 __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __B, + (__v16qi) _mm_setzero_si128 (), (__mmask16) __A); +} + + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_compressstoreu_epi16 (void * __A, __mmask16 __B, __m256i __C) +{ + __builtin_ia32_compressstoreuhi256_mask ((__v16hi *) __A, (__v16hi) __C, + (__mmask16) __B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_compress_epi16 (__m128i __A, __mmask8 __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi)__C, (__v8hi)__A, + (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_compress_epi16 (__mmask8 __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __B, + (__v8hi) _mm_setzero_si128 (), (__mmask8) __A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_compress_epi16 (__m256i __A, __mmask16 __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi)__C, + (__v16hi)__A, (__mmask16)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_compress_epi16 (__mmask16 __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __B, + (__v16hi) _mm256_setzero_si256 (), (__mmask16) __A); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_compressstoreu_epi8 (void * __A, __mmask16 __B, __m128i __C) +{ + __builtin_ia32_compressstoreuqi128_mask ((__v16qi *) __A, (__v16qi) __C, + (__mmask16) __B); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_compressstoreu_epi16 (void * __A, __mmask8 __B, __m128i __C) +{ + __builtin_ia32_compressstoreuhi128_mask ((__v8hi *) __A, (__v8hi) __C, + (__mmask8) __B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_expand_epi8 (__m128i __A, __mmask16 __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __C, + (__v16qi) __A, + (__mmask16) __B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_expand_epi8 (__mmask16 __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_expandqi128_maskz ((__v16qi) __B, + (__v16qi) _mm_setzero_si128 (), (__mmask16) __A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_expandloadu_epi8 (__m128i __A, __mmask16 __B, const void * __C) +{ + return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *) __C, + (__v16qi) __A, (__mmask16) __B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_expandloadu_epi8 (__mmask16 __A, const void * __B) +{ + return (__m128i) __builtin_ia32_expandloadqi128_maskz ((const __v16qi *) __B, + (__v16qi) _mm_setzero_si128 (), (__mmask16) __A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_expand_epi16 (__m128i __A, __mmask8 __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __C, + (__v8hi) __A, + (__mmask8) __B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_expand_epi16 (__mmask8 __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_expandhi128_maskz ((__v8hi) __B, + (__v8hi) _mm_setzero_si128 (), (__mmask8) __A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_expandloadu_epi16 (__m128i __A, __mmask8 __B, const void * __C) +{ + return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *) __C, + (__v8hi) __A, (__mmask8) __B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_expandloadu_epi16 (__mmask8 __A, const void * __B) +{ + return (__m128i) __builtin_ia32_expandloadhi128_maskz ((const __v8hi *) __B, + (__v8hi) _mm_setzero_si128 (), (__mmask8) __A); +} +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_expand_epi16 (__m256i __A, __mmask16 __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __C, + (__v16hi) __A, + (__mmask16) __B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_expand_epi16 (__mmask16 __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_expandhi256_maskz ((__v16hi) __B, + (__v16hi) _mm256_setzero_si256 (), (__mmask16) __A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_expandloadu_epi16 (__m256i __A, __mmask16 __B, const void * __C) +{ + return (__m256i) __builtin_ia32_expandloadhi256_mask ((const __v16hi *) __C, + (__v16hi) __A, (__mmask16) __B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_expandloadu_epi16 (__mmask16 __A, const void * __B) +{ + return (__m256i) __builtin_ia32_expandloadhi256_maskz ((const __v16hi *) __B, + (__v16hi) _mm256_setzero_si256 (), (__mmask16) __A); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shrdi_epi16 (__m256i __A, __m256i __B, int __C) +{ + return (__m256i) __builtin_ia32_vpshrd_v16hi ((__v16hi)__A, (__v16hi) __B, + __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shrdi_epi16 (__m256i __A, __mmask16 __B, __m256i __C, __m256i __D, + int __E) +{ + return (__m256i)__builtin_ia32_vpshrd_v16hi_mask ((__v16hi)__C, + (__v16hi) __D, __E, (__v16hi) __A, (__mmask16)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shrdi_epi16 (__mmask16 __A, __m256i __B, __m256i __C, int __D) +{ + return (__m256i)__builtin_ia32_vpshrd_v16hi_mask ((__v16hi)__B, + (__v16hi) __C, __D, (__v16hi) _mm256_setzero_si256 (), (__mmask16)__A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shrdi_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D, + int __E) +{ + return (__m256i)__builtin_ia32_vpshrd_v8si_mask ((__v8si)__C, (__v8si) __D, + __E, (__v8si) __A, (__mmask8)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shrdi_epi32 (__mmask8 __A, __m256i __B, __m256i __C, int __D) +{ + return (__m256i)__builtin_ia32_vpshrd_v8si_mask ((__v8si)__B, (__v8si) __C, + __D, (__v8si) _mm256_setzero_si256 (), (__mmask8)__A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shrdi_epi32 (__m256i __A, __m256i __B, int __C) +{ + return (__m256i) __builtin_ia32_vpshrd_v8si ((__v8si)__A, (__v8si) __B, __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shrdi_epi64 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D, + int __E) +{ + return (__m256i)__builtin_ia32_vpshrd_v4di_mask ((__v4di)__C, (__v4di) __D, + __E, (__v4di) __A, (__mmask8)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shrdi_epi64 (__mmask8 __A, __m256i __B, __m256i __C, int __D) +{ + return (__m256i)__builtin_ia32_vpshrd_v4di_mask ((__v4di)__B, (__v4di) __C, + __D, (__v4di) _mm256_setzero_si256 (), (__mmask8)__A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shrdi_epi64 (__m256i __A, __m256i __B, int __C) +{ + return (__m256i) __builtin_ia32_vpshrd_v4di ((__v4di)__A, (__v4di) __B, __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shrdi_epi16 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, + int __E) +{ + return (__m128i)__builtin_ia32_vpshrd_v8hi_mask ((__v8hi)__C, (__v8hi) __D, + __E, (__v8hi) __A, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shrdi_epi16 (__mmask8 __A, __m128i __B, __m128i __C, int __D) +{ + return (__m128i)__builtin_ia32_vpshrd_v8hi_mask ((__v8hi)__B, (__v8hi) __C, + __D, (__v8hi) _mm_setzero_si128 (), (__mmask8)__A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shrdi_epi16 (__m128i __A, __m128i __B, int __C) +{ + return (__m128i) __builtin_ia32_vpshrd_v8hi ((__v8hi)__A, (__v8hi) __B, __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shrdi_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, + int __E) +{ + return (__m128i)__builtin_ia32_vpshrd_v4si_mask ((__v4si)__C, (__v4si) __D, + __E, (__v4si) __A, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shrdi_epi32 (__mmask8 __A, __m128i __B, __m128i __C, int __D) +{ + return (__m128i)__builtin_ia32_vpshrd_v4si_mask ((__v4si)__B, (__v4si) __C, + __D, (__v4si) _mm_setzero_si128 (), (__mmask8)__A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shrdi_epi32 (__m128i __A, __m128i __B, int __C) +{ + return (__m128i) __builtin_ia32_vpshrd_v4si ((__v4si)__A, (__v4si) __B, __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shrdi_epi64 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, + int __E) +{ + return (__m128i)__builtin_ia32_vpshrd_v2di_mask ((__v2di)__C, (__v2di) __D, + __E, (__v2di) __A, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shrdi_epi64 (__mmask8 __A, __m128i __B, __m128i __C, int __D) +{ + return (__m128i)__builtin_ia32_vpshrd_v2di_mask ((__v2di)__B, (__v2di) __C, + __D, (__v2di) _mm_setzero_si128 (), (__mmask8)__A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shrdi_epi64 (__m128i __A, __m128i __B, int __C) +{ + return (__m128i) __builtin_ia32_vpshrd_v2di ((__v2di)__A, (__v2di) __B, __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shldi_epi16 (__m256i __A, __m256i __B, int __C) +{ + return (__m256i) __builtin_ia32_vpshld_v16hi ((__v16hi)__A, (__v16hi) __B, + __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shldi_epi16 (__m256i __A, __mmask16 __B, __m256i __C, __m256i __D, + int __E) +{ + return (__m256i)__builtin_ia32_vpshld_v16hi_mask ((__v16hi)__C, + (__v16hi) __D, __E, (__v16hi) __A, (__mmask16)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shldi_epi16 (__mmask16 __A, __m256i __B, __m256i __C, int __D) +{ + return (__m256i)__builtin_ia32_vpshld_v16hi_mask ((__v16hi)__B, + (__v16hi) __C, __D, (__v16hi) _mm256_setzero_si256 (), (__mmask16)__A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shldi_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D, + int __E) +{ + return (__m256i)__builtin_ia32_vpshld_v8si_mask ((__v8si)__C, (__v8si) __D, + __E, (__v8si) __A, (__mmask8)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shldi_epi32 (__mmask8 __A, __m256i __B, __m256i __C, int __D) +{ + return (__m256i)__builtin_ia32_vpshld_v8si_mask ((__v8si)__B, (__v8si) __C, + __D, (__v8si) _mm256_setzero_si256 (), (__mmask8)__A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shldi_epi32 (__m256i __A, __m256i __B, int __C) +{ + return (__m256i) __builtin_ia32_vpshld_v8si ((__v8si)__A, (__v8si) __B, __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shldi_epi64 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D, + int __E) +{ + return (__m256i)__builtin_ia32_vpshld_v4di_mask ((__v4di)__C, (__v4di) __D, + __E, (__v4di) __A, (__mmask8)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shldi_epi64 (__mmask8 __A, __m256i __B, __m256i __C, int __D) +{ + return (__m256i)__builtin_ia32_vpshld_v4di_mask ((__v4di)__B, (__v4di) __C, + __D, (__v4di) _mm256_setzero_si256 (), (__mmask8)__A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shldi_epi64 (__m256i __A, __m256i __B, int __C) +{ + return (__m256i) __builtin_ia32_vpshld_v4di ((__v4di)__A, (__v4di) __B, __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shldi_epi16 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, + int __E) +{ + return (__m128i)__builtin_ia32_vpshld_v8hi_mask ((__v8hi)__C, (__v8hi) __D, + __E, (__v8hi) __A, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shldi_epi16 (__mmask8 __A, __m128i __B, __m128i __C, int __D) +{ + return (__m128i)__builtin_ia32_vpshld_v8hi_mask ((__v8hi)__B, (__v8hi) __C, + __D, (__v8hi) _mm_setzero_si128 (), (__mmask8)__A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shldi_epi16 (__m128i __A, __m128i __B, int __C) +{ + return (__m128i) __builtin_ia32_vpshld_v8hi ((__v8hi)__A, (__v8hi) __B, __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shldi_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, + int __E) +{ + return (__m128i)__builtin_ia32_vpshld_v4si_mask ((__v4si)__C, (__v4si) __D, + __E, (__v4si) __A, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shldi_epi32 (__mmask8 __A, __m128i __B, __m128i __C, int __D) +{ + return (__m128i)__builtin_ia32_vpshld_v4si_mask ((__v4si)__B, (__v4si) __C, + __D, (__v4si) _mm_setzero_si128 (), (__mmask8)__A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shldi_epi32 (__m128i __A, __m128i __B, int __C) +{ + return (__m128i) __builtin_ia32_vpshld_v4si ((__v4si)__A, (__v4si) __B, __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shldi_epi64 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D, + int __E) +{ + return (__m128i)__builtin_ia32_vpshld_v2di_mask ((__v2di)__C, (__v2di) __D, + __E, (__v2di) __A, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shldi_epi64 (__mmask8 __A, __m128i __B, __m128i __C, int __D) +{ + return (__m128i)__builtin_ia32_vpshld_v2di_mask ((__v2di)__B, (__v2di) __C, + __D, (__v2di) _mm_setzero_si128 (), (__mmask8)__A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shldi_epi64 (__m128i __A, __m128i __B, int __C) +{ + return (__m128i) __builtin_ia32_vpshld_v2di ((__v2di)__A, (__v2di) __B, __C); +} +#else +#define _mm256_shrdi_epi16(A, B, C) \ + ((__m256i) __builtin_ia32_vpshrd_v16hi ((__v16hi)(__m256i)(A), \ + (__v16hi)(__m256i)(B),(int)(C))) +#define _mm256_mask_shrdi_epi16(A, B, C, D, E) \ + ((__m256i) __builtin_ia32_vpshrd_v16hi_mask ((__v16hi)(__m256i)(C), \ + (__v16hi)(__m256i)(D), \ + (int)(E), \ + (__v16hi)(__m256i)(A), \ + (__mmask16)(B))) +#define _mm256_maskz_shrdi_epi16(A, B, C, D) \ + ((__m256i) \ + __builtin_ia32_vpshrd_v16hi_mask ((__v16hi)(__m256i)(B), \ + (__v16hi)(__m256i)(C),(int)(D), \ + (__v16hi)(__m256i)_mm256_setzero_si256 (), \ + (__mmask16)(A))) +#define _mm256_shrdi_epi32(A, B, C) \ + ((__m256i) __builtin_ia32_vpshrd_v8si ((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B),(int)(C))) +#define _mm256_mask_shrdi_epi32(A, B, C, D, E) \ + ((__m256i) __builtin_ia32_vpshrd_v8si_mask ((__v8si)(__m256i)(C), \ + (__v8si)(__m256i)(D), \ + (int)(E), \ + (__v8si)(__m256i)(A), \ + (__mmask8)(B))) +#define _mm256_maskz_shrdi_epi32(A, B, C, D) \ + ((__m256i) \ + __builtin_ia32_vpshrd_v8si_mask ((__v8si)(__m256i)(B), \ + (__v8si)(__m256i)(C),(int)(D), \ + (__v8si)(__m256i)_mm256_setzero_si256 (), \ + (__mmask8)(A))) +#define _mm256_shrdi_epi64(A, B, C) \ + ((__m256i) __builtin_ia32_vpshrd_v4di ((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B),(int)(C))) +#define _mm256_mask_shrdi_epi64(A, B, C, D, E) \ + ((__m256i) __builtin_ia32_vpshrd_v4di_mask ((__v4di)(__m256i)(C), \ + (__v4di)(__m256i)(D), (int)(E), \ + (__v4di)(__m256i)(A), \ + (__mmask8)(B))) +#define _mm256_maskz_shrdi_epi64(A, B, C, D) \ + ((__m256i) \ + __builtin_ia32_vpshrd_v4di_mask ((__v4di)(__m256i)(B), \ + (__v4di)(__m256i)(C),(int)(D), \ + (__v4di)(__m256i)_mm256_setzero_si256 (), \ + (__mmask8)(A))) +#define _mm_shrdi_epi16(A, B, C) \ + ((__m128i) __builtin_ia32_vpshrd_v8hi ((__v8hi)(__m128i)(A), \ + (__v8hi)(__m128i)(B),(int)(C))) +#define _mm_mask_shrdi_epi16(A, B, C, D, E) \ + ((__m128i) __builtin_ia32_vpshrd_v8hi_mask ((__v8hi)(__m128i)(C), \ + (__v8hi)(__m128i)(D), (int)(E), \ + (__v8hi)(__m128i)(A), \ + (__mmask8)(B))) +#define _mm_maskz_shrdi_epi16(A, B, C, D) \ + ((__m128i) \ + __builtin_ia32_vpshrd_v8hi_mask ((__v8hi)(__m128i)(B), \ + (__v8hi)(__m128i)(C),(int)(D), \ + (__v8hi)(__m128i)_mm_setzero_si128 (), \ + (__mmask8)(A))) +#define _mm_shrdi_epi32(A, B, C) \ + ((__m128i) __builtin_ia32_vpshrd_v4si ((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B),(int)(C))) +#define _mm_mask_shrdi_epi32(A, B, C, D, E) \ + ((__m128i) __builtin_ia32_vpshrd_v4si_mask ((__v4si)(__m128i)(C), \ + (__v4si)(__m128i)(D), (int)(E), \ + (__v4si)(__m128i)(A), \ + (__mmask8)(B))) +#define _mm_maskz_shrdi_epi32(A, B, C, D) \ + ((__m128i) \ + __builtin_ia32_vpshrd_v4si_mask ((__v4si)(__m128i)(B), \ + (__v4si)(__m128i)(C),(int)(D), \ + (__v4si)(__m128i)_mm_setzero_si128 (), \ + (__mmask8)(A))) +#define _mm_shrdi_epi64(A, B, C) \ + ((__m128i) __builtin_ia32_vpshrd_v2di ((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B),(int)(C))) +#define _mm_mask_shrdi_epi64(A, B, C, D, E) \ + ((__m128i) __builtin_ia32_vpshrd_v2di_mask ((__v2di)(__m128i)(C), \ + (__v2di)(__m128i)(D), (int)(E), \ + (__v2di)(__m128i)(A), \ + (__mmask8)(B))) +#define _mm_maskz_shrdi_epi64(A, B, C, D) \ + ((__m128i) \ + __builtin_ia32_vpshrd_v2di_mask ((__v2di)(__m128i)(B), \ + (__v2di)(__m128i)(C),(int)(D), \ + (__v2di)(__m128i)_mm_setzero_si128 (), \ + (__mmask8)(A))) +#define _mm256_shldi_epi16(A, B, C) \ + ((__m256i) __builtin_ia32_vpshld_v16hi ((__v16hi)(__m256i)(A), \ + (__v16hi)(__m256i)(B),(int)(C))) +#define _mm256_mask_shldi_epi16(A, B, C, D, E) \ + ((__m256i) __builtin_ia32_vpshld_v16hi_mask ((__v16hi)(__m256i)(C), \ + (__v16hi)(__m256i)(D), \ + (int)(E), \ + (__v16hi)(__m256i)(A), \ + (__mmask16)(B))) +#define _mm256_maskz_shldi_epi16(A, B, C, D) \ + ((__m256i) \ + __builtin_ia32_vpshld_v16hi_mask ((__v16hi)(__m256i)(B), \ + (__v16hi)(__m256i)(C),(int)(D), \ + (__v16hi)(__m256i)_mm256_setzero_si256 (), \ + (__mmask16)(A))) +#define _mm256_shldi_epi32(A, B, C) \ + ((__m256i) __builtin_ia32_vpshld_v8si ((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B),(int)(C))) +#define _mm256_mask_shldi_epi32(A, B, C, D, E) \ + ((__m256i) __builtin_ia32_vpshld_v8si_mask ((__v8si)(__m256i)(C), \ + (__v8si)(__m256i)(D), (int)(E), \ + (__v8si)(__m256i)(A), \ + (__mmask8)(B))) +#define _mm256_maskz_shldi_epi32(A, B, C, D) \ + ((__m256i) \ + __builtin_ia32_vpshld_v8si_mask ((__v8si)(__m256i)(B), \ + (__v8si)(__m256i)(C),(int)(D), \ + (__v8si)(__m256i)_mm256_setzero_si256 (), \ + (__mmask8)(A))) +#define _mm256_shldi_epi64(A, B, C) \ + ((__m256i) __builtin_ia32_vpshld_v4di ((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B),(int)(C))) +#define _mm256_mask_shldi_epi64(A, B, C, D, E) \ + ((__m256i) __builtin_ia32_vpshld_v4di_mask ((__v4di)(__m256i)(C), \ + (__v4di)(__m256i)(D), (int)(E), \ + (__v4di)(__m256i)(A), \ + (__mmask8)(B))) +#define _mm256_maskz_shldi_epi64(A, B, C, D) \ + ((__m256i) \ + __builtin_ia32_vpshld_v4di_mask ((__v4di)(__m256i)(B), \ + (__v4di)(__m256i)(C),(int)(D), \ + (__v4di)(__m256i)_mm256_setzero_si256 (), \ + (__mmask8)(A))) +#define _mm_shldi_epi16(A, B, C) \ + ((__m128i) __builtin_ia32_vpshld_v8hi ((__v8hi)(__m128i)(A), \ + (__v8hi)(__m128i)(B),(int)(C))) +#define _mm_mask_shldi_epi16(A, B, C, D, E) \ + ((__m128i) __builtin_ia32_vpshld_v8hi_mask ((__v8hi)(__m128i)(C), \ + (__v8hi)(__m128i)(D), (int)(E), \ + (__v8hi)(__m128i)(A), \ + (__mmask8)(B))) +#define _mm_maskz_shldi_epi16(A, B, C, D) \ + ((__m128i) \ + __builtin_ia32_vpshld_v8hi_mask ((__v8hi)(__m128i)(B), \ + (__v8hi)(__m128i)(C),(int)(D), \ + (__v8hi)(__m128i)_mm_setzero_si128 (), \ + (__mmask8)(A))) +#define _mm_shldi_epi32(A, B, C) \ + ((__m128i) __builtin_ia32_vpshld_v4si ((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B),(int)(C))) +#define _mm_mask_shldi_epi32(A, B, C, D, E) \ + ((__m128i) __builtin_ia32_vpshld_v4si_mask ((__v4si)(__m128i)(C), \ + (__v4si)(__m128i)(D), (int)(E), \ + (__v4si)(__m128i)(A), \ + (__mmask8)(B))) +#define _mm_maskz_shldi_epi32(A, B, C, D) \ + ((__m128i) \ + __builtin_ia32_vpshld_v4si_mask ((__v4si)(__m128i)(B), \ + (__v4si)(__m128i)(C),(int)(D), \ + (__v4si)(__m128i)_mm_setzero_si128 (), \ + (__mmask8)(A))) +#define _mm_shldi_epi64(A, B, C) \ + ((__m128i) __builtin_ia32_vpshld_v2di ((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B),(int)(C))) +#define _mm_mask_shldi_epi64(A, B, C, D, E) \ + ((__m128i) __builtin_ia32_vpshld_v2di_mask ((__v2di)(__m128i)(C), \ + (__v2di)(__m128i)(D), (int)(E), \ + (__v2di)(__m128i)(A), \ + (__mmask8)(B))) +#define _mm_maskz_shldi_epi64(A, B, C, D) \ + ((__m128i) \ + __builtin_ia32_vpshld_v2di_mask ((__v2di)(__m128i)(B), \ + (__v2di)(__m128i)(C),(int)(D), \ + (__v2di)(__m128i)_mm_setzero_si128 (), \ + (__mmask8)(A))) +#endif + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shrdv_epi16 (__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_vpshrdv_v16hi ((__v16hi)__A, (__v16hi) __B, + (__v16hi) __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shrdv_epi16 (__m256i __A, __mmask16 __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpshrdv_v16hi_mask ((__v16hi)__A, + (__v16hi) __C, (__v16hi) __D, (__mmask16)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shrdv_epi16 (__mmask16 __A, __m256i __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpshrdv_v16hi_maskz ((__v16hi)__B, + (__v16hi) __C, (__v16hi) __D, (__mmask16)__A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shrdv_epi32 (__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_vpshrdv_v8si ((__v8si)__A, (__v8si) __B, + (__v8si) __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shrdv_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpshrdv_v8si_mask ((__v8si)__A, (__v8si) __C, + (__v8si) __D, (__mmask8)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shrdv_epi32 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpshrdv_v8si_maskz ((__v8si)__B, (__v8si) __C, + (__v8si) __D, (__mmask8)__A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shrdv_epi64 (__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_vpshrdv_v4di ((__v4di)__A, (__v4di) __B, + (__v4di) __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shrdv_epi64 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpshrdv_v4di_mask ((__v4di)__A, (__v4di) __C, + (__v4di) __D, (__mmask8)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shrdv_epi64 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpshrdv_v4di_maskz ((__v4di)__B, (__v4di) __C, + (__v4di) __D, (__mmask8)__A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shrdv_epi16 (__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpshrdv_v8hi ((__v8hi)__A, (__v8hi) __B, + (__v8hi) __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shrdv_epi16 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpshrdv_v8hi_mask ((__v8hi)__A, (__v8hi) __C, + (__v8hi) __D, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shrdv_epi16 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpshrdv_v8hi_maskz ((__v8hi)__B, (__v8hi) __C, + (__v8hi) __D, (__mmask8)__A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shrdv_epi32 (__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpshrdv_v4si ((__v4si)__A, (__v4si) __B, + (__v4si) __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shrdv_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpshrdv_v4si_mask ((__v4si)__A, (__v4si) __C, + (__v4si) __D, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shrdv_epi32 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpshrdv_v4si_maskz ((__v4si)__B, (__v4si) __C, + (__v4si) __D, (__mmask8)__A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shrdv_epi64 (__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpshrdv_v2di ((__v2di)__A, (__v2di) __B, + (__v2di) __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shrdv_epi64 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpshrdv_v2di_mask ((__v2di)__A, (__v2di) __C, + (__v2di) __D, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shrdv_epi64 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpshrdv_v2di_maskz ((__v2di)__B, (__v2di) __C, + (__v2di) __D, (__mmask8)__A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shldv_epi16 (__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_vpshldv_v16hi ((__v16hi)__A, (__v16hi) __B, + (__v16hi) __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shldv_epi16 (__m256i __A, __mmask16 __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpshldv_v16hi_mask ((__v16hi)__A, + (__v16hi) __C, (__v16hi) __D, (__mmask16)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shldv_epi16 (__mmask16 __A, __m256i __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpshldv_v16hi_maskz ((__v16hi)__B, + (__v16hi) __C, (__v16hi) __D, (__mmask16)__A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shldv_epi32 (__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_vpshldv_v8si ((__v8si)__A, (__v8si) __B, + (__v8si) __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shldv_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpshldv_v8si_mask ((__v8si)__A, (__v8si) __C, + (__v8si) __D, (__mmask8)__B) ; +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shldv_epi32 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpshldv_v8si_maskz ((__v8si)__B, (__v8si) __C, + (__v8si) __D, (__mmask8)__A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shldv_epi64 (__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_vpshldv_v4di ((__v4di)__A, (__v4di) __B, + (__v4di) __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shldv_epi64 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpshldv_v4di_mask ((__v4di)__A, (__v4di) __C, + (__v4di) __D, (__mmask8)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shldv_epi64 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpshldv_v4di_maskz ((__v4di)__B, (__v4di) __C, + (__v4di) __D, (__mmask8)__A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shldv_epi16 (__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpshldv_v8hi ((__v8hi)__A, (__v8hi) __B, + (__v8hi) __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shldv_epi16 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpshldv_v8hi_mask ((__v8hi)__A, (__v8hi) __C, + (__v8hi) __D, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shldv_epi16 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpshldv_v8hi_maskz ((__v8hi)__B, (__v8hi) __C, + (__v8hi) __D, (__mmask8)__A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shldv_epi32 (__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpshldv_v4si ((__v4si)__A, (__v4si) __B, + (__v4si) __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shldv_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpshldv_v4si_mask ((__v4si)__A, (__v4si) __C, + (__v4si) __D, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shldv_epi32 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpshldv_v4si_maskz ((__v4si)__B, (__v4si) __C, + (__v4si) __D, (__mmask8)__A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shldv_epi64 (__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpshldv_v2di ((__v2di)__A, (__v2di) __B, + (__v2di) __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shldv_epi64 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpshldv_v2di_mask ((__v2di)__A, (__v2di) __C, + (__v2di) __D, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shldv_epi64 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpshldv_v2di_maskz ((__v2di)__B, (__v2di) __C, + (__v2di) __D, (__mmask8)__A); +} + + + + +#ifdef __DISABLE_AVX512VBMI2VL__ +#undef __DISABLE_AVX512VBMI2VL__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VBMIVL__ */ + +#if !defined(__AVX512VL__) || !defined(__AVX512VBMI2__) || \ + !defined(__AVX512BW__) +#pragma GCC push_options +#pragma GCC target("avx512vbmi2,avx512vl,avx512bw") +#define __DISABLE_AVX512VBMI2VLBW__ +#endif /* __AVX512VBMIVLBW__ */ + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_compress_epi8 (__m256i __A, __mmask32 __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi)__C, + (__v32qi)__A, (__mmask32)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_compress_epi8 (__mmask32 __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __B, + (__v32qi) _mm256_setzero_si256 (), (__mmask32) __A); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_compressstoreu_epi8 (void * __A, __mmask32 __B, __m256i __C) +{ + __builtin_ia32_compressstoreuqi256_mask ((__v32qi *) __A, (__v32qi) __C, + (__mmask32) __B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_expand_epi8 (__m256i __A, __mmask32 __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __C, + (__v32qi) __A, + (__mmask32) __B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_expand_epi8 (__mmask32 __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_expandqi256_maskz ((__v32qi) __B, + (__v32qi) _mm256_setzero_si256 (), (__mmask32) __A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_expandloadu_epi8 (__m256i __A, __mmask32 __B, const void * __C) +{ + return (__m256i) __builtin_ia32_expandloadqi256_mask ((const __v32qi *) __C, + (__v32qi) __A, (__mmask32) __B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_expandloadu_epi8 (__mmask32 __A, const void * __B) +{ + return (__m256i) __builtin_ia32_expandloadqi256_maskz ((const __v32qi *) __B, + (__v32qi) _mm256_setzero_si256 (), (__mmask32) __A); +} + +#ifdef __DISABLE_AVX512VBMI2VLBW__ +#undef __DISABLE_AVX512VBMI2VLBW__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VBMIVLBW__ */ + +#endif /* _AVX512VBMIVLINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vbmiintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vbmiintrin.h new file mode 100644 index 0000000..f296890 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vbmiintrin.h @@ -0,0 +1,158 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VBMIINTRIN_H_INCLUDED +#define _AVX512VBMIINTRIN_H_INCLUDED + +#ifndef __AVX512VBMI__ +#pragma GCC push_options +#pragma GCC target("avx512vbmi") +#define __DISABLE_AVX512VBMI__ +#endif /* __AVX512VBMI__ */ + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_multishift_epi64_epi8 (__m512i __W, __mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X, + (__v64qi) __Y, + (__v64qi) __W, + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_multishift_epi64_epi8 (__mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X, + (__v64qi) __Y, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_multishift_epi64_epi8 (__m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X, + (__v64qi) __Y, + (__v64qi) + _mm512_undefined_epi32 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutexvar_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B, + (__v64qi) __A, + (__v64qi) + _mm512_undefined_epi32 (), + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutexvar_epi8 (__mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B, + (__v64qi) __A, + (__v64qi) + _mm512_setzero_si512(), + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutexvar_epi8 (__m512i __W, __mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B, + (__v64qi) __A, + (__v64qi) __W, + (__mmask64) __M); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_permutex2var_epi8 (__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varqi512_mask ((__v64qi) __I + /* idx */ , + (__v64qi) __A, + (__v64qi) __B, + (__mmask64) -1); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_permutex2var_epi8 (__m512i __A, __mmask64 __U, + __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varqi512_mask ((__v64qi) __I + /* idx */ , + (__v64qi) __A, + (__v64qi) __B, + (__mmask64) + __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask2_permutex2var_epi8 (__m512i __A, __m512i __I, + __mmask64 __U, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermi2varqi512_mask ((__v64qi) __A, + (__v64qi) __I + /* idx */ , + (__v64qi) __B, + (__mmask64) + __U); +} + +extern __inline __m512i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_permutex2var_epi8 (__mmask64 __U, __m512i __A, + __m512i __I, __m512i __B) +{ + return (__m512i) __builtin_ia32_vpermt2varqi512_maskz ((__v64qi) __I + /* idx */ , + (__v64qi) __A, + (__v64qi) __B, + (__mmask64) + __U); +} + +#ifdef __DISABLE_AVX512VBMI__ +#undef __DISABLE_AVX512VBMI__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VBMI__ */ + +#endif /* _AVX512VBMIINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vbmivlintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vbmivlintrin.h new file mode 100644 index 0000000..445c5d6 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vbmivlintrin.h @@ -0,0 +1,273 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VBMIVLINTRIN_H_INCLUDED +#define _AVX512VBMIVLINTRIN_H_INCLUDED + +#if !defined(__AVX512VL__) || !defined(__AVX512VBMI__) +#pragma GCC push_options +#pragma GCC target("avx512vbmi,avx512vl") +#define __DISABLE_AVX512VBMIVL__ +#endif /* __AVX512VBMIVL__ */ + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_multishift_epi64_epi8 (__m256i __W, __mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X, + (__v32qi) __Y, + (__v32qi) __W, + (__mmask32) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_multishift_epi64_epi8 (__mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X, + (__v32qi) __Y, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_multishift_epi64_epi8 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X, + (__v32qi) __Y, + (__v32qi) + _mm256_undefined_si256 (), + (__mmask32) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_multishift_epi64_epi8 (__m128i __W, __mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X, + (__v16qi) __Y, + (__v16qi) __W, + (__mmask16) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_multishift_epi64_epi8 (__mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X, + (__v16qi) __Y, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_multishift_epi64_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X, + (__v16qi) __Y, + (__v16qi) + _mm_undefined_si128 (), + (__mmask16) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutexvar_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B, + (__v32qi) __A, + (__v32qi) + _mm256_undefined_si256 (), + (__mmask32) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutexvar_epi8 (__mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B, + (__v32qi) __A, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutexvar_epi8 (__m256i __W, __mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B, + (__v32qi) __A, + (__v32qi) __W, + (__mmask32) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutexvar_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B, + (__v16qi) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask16) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_permutexvar_epi8 (__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B, + (__v16qi) __A, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_permutexvar_epi8 (__m128i __W, __mmask16 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B, + (__v16qi) __A, + (__v16qi) __W, + (__mmask16) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutex2var_epi8 (__m256i __A, __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2varqi256_mask ((__v32qi) __I + /* idx */ , + (__v32qi) __A, + (__v32qi) __B, + (__mmask32) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutex2var_epi8 (__m256i __A, __mmask32 __U, + __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2varqi256_mask ((__v32qi) __I + /* idx */ , + (__v32qi) __A, + (__v32qi) __B, + (__mmask32) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask2_permutex2var_epi8 (__m256i __A, __m256i __I, + __mmask32 __U, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermi2varqi256_mask ((__v32qi) __A, + (__v32qi) __I + /* idx */ , + (__v32qi) __B, + (__mmask32) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutex2var_epi8 (__mmask32 __U, __m256i __A, + __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2varqi256_maskz ((__v32qi) __I + /* idx */ , + (__v32qi) __A, + (__v32qi) __B, + (__mmask32) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutex2var_epi8 (__m128i __A, __m128i __I, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2varqi128_mask ((__v16qi) __I + /* idx */ , + (__v16qi) __A, + (__v16qi) __B, + (__mmask16) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_permutex2var_epi8 (__m128i __A, __mmask16 __U, __m128i __I, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2varqi128_mask ((__v16qi) __I + /* idx */ , + (__v16qi) __A, + (__v16qi) __B, + (__mmask16) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask2_permutex2var_epi8 (__m128i __A, __m128i __I, __mmask16 __U, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermi2varqi128_mask ((__v16qi) __A, + (__v16qi) __I + /* idx */ , + (__v16qi) __B, + (__mmask16) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_permutex2var_epi8 (__mmask16 __U, __m128i __A, __m128i __I, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2varqi128_maskz ((__v16qi) __I + /* idx */ , + (__v16qi) __A, + (__v16qi) __B, + (__mmask16) + __U); +} + +#ifdef __DISABLE_AVX512VBMIVL__ +#undef __DISABLE_AVX512VBMIVL__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VBMIVL__ */ + +#endif /* _AVX512VBMIVLINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vlbwintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vlbwintrin.h new file mode 100644 index 0000000..192d54e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vlbwintrin.h @@ -0,0 +1,4758 @@ +/* Copyright (C) 2014-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VLBWINTRIN_H_INCLUDED +#define _AVX512VLBWINTRIN_H_INCLUDED + +#if !defined(__AVX512VL__) || !defined(__AVX512BW__) +#pragma GCC push_options +#pragma GCC target("avx512vl,avx512bw") +#define __DISABLE_AVX512VLBW__ +#endif /* __AVX512VLBW__ */ + +/* Internal data types for implementing the intrinsics. */ +typedef short __v16hi_u __attribute__ ((__vector_size__ (32), \ + __may_alias__, __aligned__ (1))); +typedef short __v8hi_u __attribute__ ((__vector_size__ (16), \ + __may_alias__, __aligned__ (1))); +typedef char __v32qi_u __attribute__ ((__vector_size__ (32), \ + __may_alias__, __aligned__ (1))); +typedef char __v16qi_u __attribute__ ((__vector_size__ (16), \ + __may_alias__, __aligned__ (1))); + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mov_epi8 (__m256i __W, __mmask32 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_movdquqi256_mask ((__v32qi) __A, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_movdquqi256_mask ((__v32qi) __A, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mov_epi8 (__m128i __W, __mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_movdquqi128_mask ((__v16qi) __A, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mov_epi8 (__mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_movdquqi128_mask ((__v16qi) __A, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu_epi8 (void *__P, __m256i __A) +{ + *(__v32qi_u *) __P = (__v32qi_u) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A) +{ + __builtin_ia32_storedquqi256_mask ((char *) __P, + (__v32qi) __A, + (__mmask32) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_epi8 (void *__P, __m128i __A) +{ + *(__v16qi_u *) __P = (__v16qi_u) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A) +{ + __builtin_ia32_storedquqi128_mask ((char *) __P, + (__v16qi) __A, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu_epi16 (void const *__P) +{ + return (__m256i) (*(__v16hi_u *) __P); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquhi256_mask ((const short *) __P, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquhi256_mask ((const short *) __P, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_epi16 (void const *__P) +{ + return (__m128i) (*(__v8hi_u *) __P); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquhi128_mask ((const short *) __P, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquhi128_mask ((const short *) __P, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mov_epi16 (__m256i __W, __mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_movdquhi256_mask ((__v16hi) __A, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mov_epi16 (__mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_movdquhi256_mask ((__v16hi) __A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_movdquhi128_mask ((__v8hi) __A, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mov_epi16 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_movdquhi128_mask ((__v8hi) __A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu_epi8 (void const *__P) +{ + return (__m256i) (*(__v32qi_u *) __P); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquqi256_mask ((const char *) __P, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquqi256_mask ((const char *) __P, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_epi8 (void const *__P) +{ + return (__m128i) (*(__v16qi_u *) __P); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquqi128_mask ((const char *) __P, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquqi128_mask ((const char *) __P, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi16_epi8 (__m256i __A) +{ + + return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask16) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi16_storeu_epi8 (void * __P, __mmask16 __M,__m256i __A) +{ + __builtin_ia32_pmovwb256mem_mask ((__v16qi *) __P , (__v16hi) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi16_epi8 (__mmask16 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsepi16_epi8 (__m128i __A) +{ + + return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask8 __M,__m128i __A) +{ + __builtin_ia32_pmovswb128mem_mask ((unsigned long long *) __P , (__v8hi) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtsepi16_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtsepi16_epi8 (__m256i __A) +{ + + return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask16) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask16 __M,__m256i __A) +{ + __builtin_ia32_pmovswb256mem_mask ((__v16qi *) __P , (__v16hi) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtsepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtsepi16_epi8 (__mmask16 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtusepi16_epi8 (__m128i __A) +{ + + return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M,__m128i __A) +{ + __builtin_ia32_pmovuswb128mem_mask ((unsigned long long *) __P , (__v8hi) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtusepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A, + (__v16qi) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtusepi16_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtusepi16_epi8 (__m256i __A) +{ + + return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask16) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask16 __M,__m256i __A) +{ + __builtin_ia32_pmovuswb256mem_mask ((__v16qi *) __P , (__v16hi) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtusepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A, + (__v16qi) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtusepi16_epi8 (__mmask16 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_pbroadcastb256_mask ((__v16qi) __A, + (__v32qi) __O, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_pbroadcastb256_mask ((__v16qi) __A, + (__v32qi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A) +{ + return (__m256i) __builtin_ia32_pbroadcastb256_gpr_mask (__A, + (__v32qi) __O, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_set1_epi8 (__mmask32 __M, char __A) +{ + return (__m256i) __builtin_ia32_pbroadcastb256_gpr_mask (__A, + (__v32qi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pbroadcastb128_mask ((__v16qi) __A, + (__v16qi) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pbroadcastb128_mask ((__v16qi) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A) +{ + return (__m128i) __builtin_ia32_pbroadcastb128_gpr_mask (__A, + (__v16qi) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_set1_epi8 (__mmask16 __M, char __A) +{ + return (__m128i) __builtin_ia32_pbroadcastb128_gpr_mask (__A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_pbroadcastw256_mask ((__v8hi) __A, + (__v16hi) __O, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_pbroadcastw256_mask ((__v8hi) __A, + (__v16hi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A) +{ + return (__m256i) __builtin_ia32_pbroadcastw256_gpr_mask (__A, + (__v16hi) __O, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_set1_epi16 (__mmask16 __M, short __A) +{ + return (__m256i) __builtin_ia32_pbroadcastw256_gpr_mask (__A, + (__v16hi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pbroadcastw128_mask ((__v8hi) __A, + (__v8hi) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pbroadcastw128_mask ((__v8hi) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A) +{ + return (__m128i) __builtin_ia32_pbroadcastw128_gpr_mask (__A, + (__v8hi) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_set1_epi16 (__mmask8 __M, short __A) +{ + return (__m128i) __builtin_ia32_pbroadcastw128_gpr_mask (__A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutexvar_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B, + (__v16hi) __A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutexvar_epi16 (__mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B, + (__v16hi) __A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B, + (__v16hi) __A, + (__v16hi) __W, + (__mmask16) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutexvar_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B, + (__v8hi) __A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_permutexvar_epi16 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B, + (__v8hi) __A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_permutexvar_epi16 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B, + (__v8hi) __A, + (__v8hi) __W, + (__mmask8) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutex2var_epi16 (__m256i __A, __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2varhi256_mask ((__v16hi) __I + /* idx */ , + (__v16hi) __A, + (__v16hi) __B, + (__mmask16) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutex2var_epi16 (__m256i __A, __mmask16 __U, + __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2varhi256_mask ((__v16hi) __I + /* idx */ , + (__v16hi) __A, + (__v16hi) __B, + (__mmask16) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask2_permutex2var_epi16 (__m256i __A, __m256i __I, + __mmask16 __U, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermi2varhi256_mask ((__v16hi) __A, + (__v16hi) __I + /* idx */ , + (__v16hi) __B, + (__mmask16) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A, + __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2varhi256_maskz ((__v16hi) __I + /* idx */ , + (__v16hi) __A, + (__v16hi) __B, + (__mmask16) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutex2var_epi16 (__m128i __A, __m128i __I, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2varhi128_mask ((__v8hi) __I + /* idx */ , + (__v8hi) __A, + (__v8hi) __B, + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_permutex2var_epi16 (__m128i __A, __mmask8 __U, __m128i __I, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2varhi128_mask ((__v8hi) __I + /* idx */ , + (__v8hi) __A, + (__v8hi) __B, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask2_permutex2var_epi16 (__m128i __A, __m128i __I, __mmask8 __U, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermi2varhi128_mask ((__v8hi) __A, + (__v8hi) __I + /* idx */ , + (__v8hi) __B, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_permutex2var_epi16 (__mmask8 __U, __m128i __A, __m128i __I, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2varhi128_maskz ((__v8hi) __I + /* idx */ , + (__v8hi) __A, + (__v8hi) __B, + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_maddubs_epi16 (__m256i __W, __mmask16 __U, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X, + (__v32qi) __Y, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_maddubs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X, + (__v32qi) __Y, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_maddubs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, + __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X, + (__v16qi) __Y, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_maddubs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X, + (__v16qi) __Y, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_madd_epi16 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_madd_epi16 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_madd_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_madd_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movepi8_mask (__m128i __A) +{ + return (__mmask16) __builtin_ia32_cvtb2mask128 ((__v16qi) __A); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movepi8_mask (__m256i __A) +{ + return (__mmask32) __builtin_ia32_cvtb2mask256 ((__v32qi) __A); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movepi16_mask (__m128i __A) +{ + return (__mmask8) __builtin_ia32_cvtw2mask128 ((__v8hi) __A); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movepi16_mask (__m256i __A) +{ + return (__mmask16) __builtin_ia32_cvtw2mask256 ((__v16hi) __A); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movm_epi8 (__mmask16 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2b128 (__A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movm_epi8 (__mmask32 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2b256 (__A); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movm_epi16 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2w128 (__A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movm_epi16 (__mmask16 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2w256 (__A); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_test_epi8_mask (__m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_ptestmb128 ((__v16qi) __A, + (__v16qi) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_ptestmb128 ((__v16qi) __A, + (__v16qi) __B, __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_test_epi8_mask (__m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_ptestmb256 ((__v32qi) __A, + (__v32qi) __B, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_test_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_ptestmb256 ((__v32qi) __A, + (__v32qi) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_test_epi16_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestmw128 ((__v8hi) __A, + (__v8hi) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_test_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestmw128 ((__v8hi) __A, + (__v8hi) __B, __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_test_epi16_mask (__m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_ptestmw256 ((__v16hi) __A, + (__v16hi) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_ptestmw256 ((__v16hi) __A, + (__v16hi) __B, __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_epu16 (__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_epu16 (__m256i __W, __mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_epu16 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_epu16 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_epi16 (__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_epi16 (__m256i __W, __mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_epu8 (__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_epu8 (__m256i __W, __mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_epu8 (__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_epu8 (__m128i __W, __mmask16 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_epi8 (__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_epi8 (__m256i __W, __mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_epi8 (__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_epi8 (__m128i __W, __mmask16 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_epu8 (__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_epu8 (__m256i __W, __mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_epu8 (__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_epu8 (__m128i __W, __mmask16 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_epi8 (__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_epi8 (__m256i __W, __mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_epi8 (__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_epi8 (__m128i __W, __mmask16 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_epi16 (__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_epi16 (__m256i __W, __mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_epi16 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_epi16 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_epu16 (__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_epu16 (__m256i __W, __mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_epu16 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_epu16 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_epi16 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_epi16 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __M); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_alignr_epi8 (__m256i __W, __mmask32 __U, __m256i __A, + __m256i __B, const int __N) +{ + return (__m256i) __builtin_ia32_palignr256_mask ((__v4di) __A, + (__v4di) __B, + __N * 8, + (__v4di) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_alignr_epi8 (__mmask32 __U, __m256i __A, __m256i __B, + const int __N) +{ + return (__m256i) __builtin_ia32_palignr256_mask ((__v4di) __A, + (__v4di) __B, + __N * 8, + (__v4di) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_alignr_epi8 (__m128i __W, __mmask16 __U, __m128i __A, + __m128i __B, const int __N) +{ + return (__m128i) __builtin_ia32_palignr128_mask ((__v2di) __A, + (__v2di) __B, + __N * 8, + (__v2di) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_alignr_epi8 (__mmask16 __U, __m128i __A, __m128i __B, + const int __N) +{ + return (__m128i) __builtin_ia32_palignr128_mask ((__v2di) __A, + (__v2di) __B, + __N * 8, + (__v2di) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_dbsad_epu8 (__m256i __A, __m256i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_dbpsadbw256_mask ((__v32qi) __A, + (__v32qi) __B, + __imm, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_dbsad_epu8 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_dbpsadbw256_mask ((__v32qi) __A, + (__v32qi) __B, + __imm, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_dbsad_epu8 (__mmask16 __U, __m256i __A, __m256i __B, + const int __imm) +{ + return (__m256i) __builtin_ia32_dbpsadbw256_mask ((__v32qi) __A, + (__v32qi) __B, + __imm, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_dbsad_epu8 (__m128i __A, __m128i __B, const int __imm) +{ + return (__m128i) __builtin_ia32_dbpsadbw128_mask ((__v16qi) __A, + (__v16qi) __B, + __imm, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_dbsad_epu8 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B, const int __imm) +{ + return (__m128i) __builtin_ia32_dbpsadbw128_mask ((__v16qi) __A, + (__v16qi) __B, + __imm, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_dbsad_epu8 (__mmask8 __U, __m128i __A, __m128i __B, + const int __imm) +{ + return (__m128i) __builtin_ia32_dbpsadbw128_mask ((__v16qi) __A, + (__v16qi) __B, + __imm, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W) +{ + return (__m128i) __builtin_ia32_blendmw_128_mask ((__v8hi) __A, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W) +{ + return (__m128i) __builtin_ia32_blendmb_128_mask ((__v16qi) __A, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W) +{ + return (__m256i) __builtin_ia32_blendmw_256_mask ((__v16hi) __A, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W) +{ + return (__m256i) __builtin_ia32_blendmb_256_mask ((__v32qi) __A, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_epi16_mask (__mmask8 __U, __m128i __X, __m128i __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_epi16_mask (__m128i __X, __m128i __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmp_epi16_mask (__mmask16 __U, __m256i __X, __m256i __Y, + const int __P) +{ + return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, __P, + (__mmask16) __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_epi16_mask (__m256i __X, __m256i __Y, const int __P) +{ + return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, __P, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_epi8_mask (__mmask16 __U, __m128i __X, __m128i __Y, + const int __P) +{ + return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, __P, + (__mmask16) __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_epi8_mask (__m128i __X, __m128i __Y, const int __P) +{ + return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, __P, + (__mmask16) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmp_epi8_mask (__mmask32 __U, __m256i __X, __m256i __Y, + const int __P) +{ + return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, __P, + (__mmask32) __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_epi8_mask (__m256i __X, __m256i __Y, const int __P) +{ + return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, __P, + (__mmask32) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_epu16_mask (__mmask8 __U, __m128i __X, __m128i __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_epu16_mask (__m128i __X, __m128i __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmp_epu16_mask (__mmask16 __U, __m256i __X, __m256i __Y, + const int __P) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, __P, + (__mmask16) __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_epu16_mask (__m256i __X, __m256i __Y, const int __P) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, __P, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_epu8_mask (__mmask16 __U, __m128i __X, __m128i __Y, + const int __P) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, __P, + (__mmask16) __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_epu8_mask (__m128i __X, __m128i __Y, const int __P) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, __P, + (__mmask16) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmp_epu8_mask (__mmask32 __U, __m256i __X, __m256i __Y, + const int __P) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, __P, + (__mmask32) __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_epu8_mask (__m256i __X, __m256i __Y, const int __P) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, __P, + (__mmask32) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srli_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_psrlwi256_mask ((__v16hi) __A, __imm, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srli_epi16 (__mmask16 __U, __m256i __A, const int __imm) +{ + return (__m256i) __builtin_ia32_psrlwi256_mask ((__v16hi) __A, __imm, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srli_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_psrlwi128_mask ((__v8hi) __A, __imm, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srli_epi16 (__mmask8 __U, __m128i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_psrlwi128_mask ((__v8hi) __A, __imm, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shufflehi_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_pshufhw256_mask ((__v16hi) __A, + __imm, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shufflehi_epi16 (__mmask16 __U, __m256i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_pshufhw256_mask ((__v16hi) __A, + __imm, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shufflehi_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_pshufhw128_mask ((__v8hi) __A, __imm, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shufflehi_epi16 (__mmask8 __U, __m128i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_pshufhw128_mask ((__v8hi) __A, __imm, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shufflelo_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_pshuflw256_mask ((__v16hi) __A, + __imm, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shufflelo_epi16 (__mmask16 __U, __m256i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_pshuflw256_mask ((__v16hi) __A, + __imm, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shufflelo_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_pshuflw128_mask ((__v8hi) __A, __imm, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shufflelo_epi16 (__mmask8 __U, __m128i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_pshuflw128_mask ((__v8hi) __A, __imm, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srai_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_psrawi256_mask ((__v16hi) __A, __imm, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srai_epi16 (__mmask16 __U, __m256i __A, const int __imm) +{ + return (__m256i) __builtin_ia32_psrawi256_mask ((__v16hi) __A, __imm, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srai_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_psrawi128_mask ((__v8hi) __A, __imm, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srai_epi16 (__mmask8 __U, __m128i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_psrawi128_mask ((__v8hi) __A, __imm, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_slli_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + int __B) +{ + return (__m256i) __builtin_ia32_psllwi256_mask ((__v16hi) __A, __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_slli_epi16 (__mmask16 __U, __m256i __A, int __B) +{ + return (__m256i) __builtin_ia32_psllwi256_mask ((__v16hi) __A, __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_slli_epi16 (__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i) __builtin_ia32_psllwi128_mask ((__v8hi) __A, __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i) __builtin_ia32_psllwi128_mask ((__v8hi) __A, __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +#else +#define _mm256_mask_alignr_epi8(W, U, X, Y, N) \ + ((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X), \ + (__v4di)(__m256i)(Y), (int)((N) * 8), \ + (__v4di)(__m256i)(X), (__mmask32)(U))) + +#define _mm256_mask_srli_epi16(W, U, A, B) \ + ((__m256i) __builtin_ia32_psrlwi256_mask ((__v16hi)(__m256i)(A), \ + (int)(B), (__v16hi)(__m256i)(W), (__mmask16)(U))) + +#define _mm256_maskz_srli_epi16(U, A, B) \ + ((__m256i) __builtin_ia32_psrlwi256_mask ((__v16hi)(__m256i)(A), \ + (int)(B), (__v16hi)_mm256_setzero_si256 (), (__mmask16)(U))) + +#define _mm_mask_srli_epi16(W, U, A, B) \ + ((__m128i) __builtin_ia32_psrlwi128_mask ((__v8hi)(__m128i)(A), \ + (int)(B), (__v8hi)(__m128i)(W), (__mmask8)(U))) + +#define _mm_maskz_srli_epi16(U, A, B) \ + ((__m128i) __builtin_ia32_psrlwi128_mask ((__v8hi)(__m128i)(A), \ + (int)(B), (__v8hi)_mm_setzero_si128(), (__mmask8)(U))) + +#define _mm256_mask_srai_epi16(W, U, A, B) \ + ((__m256i) __builtin_ia32_psrawi256_mask ((__v16hi)(__m256i)(A), \ + (int)(B), (__v16hi)(__m256i)(W), (__mmask16)(U))) + +#define _mm256_maskz_srai_epi16(U, A, B) \ + ((__m256i) __builtin_ia32_psrawi256_mask ((__v16hi)(__m256i)(A), \ + (int)(B), (__v16hi)_mm256_setzero_si256 (), (__mmask16)(U))) + +#define _mm_mask_srai_epi16(W, U, A, B) \ + ((__m128i) __builtin_ia32_psrawi128_mask ((__v8hi)(__m128i)(A), \ + (int)(B), (__v8hi)(__m128i)(W), (__mmask8)(U))) + +#define _mm_maskz_srai_epi16(U, A, B) \ + ((__m128i) __builtin_ia32_psrawi128_mask ((__v8hi)(__m128i)(A), \ + (int)(B), (__v8hi)_mm_setzero_si128(), (__mmask8)(U))) + +#define _mm256_mask_shufflehi_epi16(W, U, A, B) \ + ((__m256i) __builtin_ia32_pshufhw256_mask ((__v16hi)(__m256i)(A), (int)(B), \ + (__v16hi)(__m256i)(W), \ + (__mmask16)(U))) + +#define _mm256_maskz_shufflehi_epi16(U, A, B) \ + ((__m256i) __builtin_ia32_pshufhw256_mask ((__v16hi)(__m256i)(A), (int)(B), \ + (__v16hi)(__m256i)_mm256_setzero_si256 (), \ + (__mmask16)(U))) + +#define _mm_mask_shufflehi_epi16(W, U, A, B) \ + ((__m128i) __builtin_ia32_pshufhw128_mask ((__v8hi)(__m128i)(A), (int)(B), \ + (__v8hi)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_shufflehi_epi16(U, A, B) \ + ((__m128i) __builtin_ia32_pshufhw128_mask ((__v8hi)(__m128i)(A), (int)(B), \ + (__v8hi)(__m128i)_mm_setzero_si128 (), \ + (__mmask8)(U))) + +#define _mm256_mask_shufflelo_epi16(W, U, A, B) \ + ((__m256i) __builtin_ia32_pshuflw256_mask ((__v16hi)(__m256i)(A), (int)(B), \ + (__v16hi)(__m256i)(W), \ + (__mmask16)(U))) + +#define _mm256_maskz_shufflelo_epi16(U, A, B) \ + ((__m256i) __builtin_ia32_pshuflw256_mask ((__v16hi)(__m256i)(A), (int)(B), \ + (__v16hi)(__m256i)_mm256_setzero_si256 (), \ + (__mmask16)(U))) + +#define _mm_mask_shufflelo_epi16(W, U, A, B) \ + ((__m128i) __builtin_ia32_pshuflw128_mask ((__v8hi)(__m128i)(A), (int)(B), \ + (__v8hi)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_shufflelo_epi16(U, A, B) \ + ((__m128i) __builtin_ia32_pshuflw128_mask ((__v8hi)(__m128i)(A), (int)(B), \ + (__v8hi)(__m128i)_mm_setzero_si128 (), \ + (__mmask8)(U))) + +#define _mm256_maskz_alignr_epi8(U, X, Y, N) \ + ((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X), \ + (__v4di)(__m256i)(Y), (int)((N) * 8), \ + (__v4di)(__m256i)_mm256_setzero_si256 (), \ + (__mmask32)(U))) + +#define _mm_mask_alignr_epi8(W, U, X, Y, N) \ + ((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (int)((N) * 8), \ + (__v2di)(__m128i)(X), (__mmask16)(U))) + +#define _mm_maskz_alignr_epi8(U, X, Y, N) \ + ((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (int)((N) * 8), \ + (__v2di)(__m128i)_mm_setzero_si128 (), \ + (__mmask16)(U))) + +#define _mm_mask_slli_epi16(W, U, X, C) \ + ((__m128i)__builtin_ia32_psllwi128_mask ((__v8hi)(__m128i)(X), (int)(C),\ + (__v8hi)(__m128i)(W),\ + (__mmask8)(U))) + +#define _mm_maskz_slli_epi16(U, X, C) \ + ((__m128i)__builtin_ia32_psllwi128_mask ((__v8hi)(__m128i)(X), (int)(C),\ + (__v8hi)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)(U))) + +#define _mm256_dbsad_epu8(X, Y, C) \ + ((__m256i) __builtin_ia32_dbpsadbw256_mask ((__v32qi)(__m256i) (X), \ + (__v32qi)(__m256i) (Y), (int) (C), \ + (__v16hi)(__m256i)_mm256_setzero_si256(),\ + (__mmask16)-1)) + +#define _mm256_mask_slli_epi16(W, U, X, C) \ + ((__m256i)__builtin_ia32_psllwi256_mask ((__v16hi)(__m256i)(X), (int)(C),\ + (__v16hi)(__m256i)(W),\ + (__mmask16)(U))) + +#define _mm256_maskz_slli_epi16(U, X, C) \ + ((__m256i)__builtin_ia32_psllwi256_mask ((__v16hi)(__m256i)(X), (int)(C),\ + (__v16hi)(__m256i)_mm256_setzero_si256 (),\ + (__mmask16)(U))) + +#define _mm256_mask_dbsad_epu8(W, U, X, Y, C) \ + ((__m256i) __builtin_ia32_dbpsadbw256_mask ((__v32qi)(__m256i) (X), \ + (__v32qi)(__m256i) (Y), (int) (C), \ + (__v16hi)(__m256i)(W), \ + (__mmask16)(U))) + +#define _mm256_maskz_dbsad_epu8(U, X, Y, C) \ + ((__m256i) __builtin_ia32_dbpsadbw256_mask ((__v32qi)(__m256i) (X), \ + (__v32qi)(__m256i) (Y), (int) (C), \ + (__v16hi)(__m256i)_mm256_setzero_si256(),\ + (__mmask16)(U))) + +#define _mm_dbsad_epu8(X, Y, C) \ + ((__m128i) __builtin_ia32_dbpsadbw128_mask ((__v16qi)(__m128i) (X), \ + (__v16qi)(__m128i) (Y), (int) (C), \ + (__v8hi)(__m128i)_mm_setzero_si128(), \ + (__mmask8)-1)) + +#define _mm_mask_dbsad_epu8(W, U, X, Y, C) \ + ((__m128i) __builtin_ia32_dbpsadbw128_mask ((__v16qi)(__m128i) (X), \ + (__v16qi)(__m128i) (Y), (int) (C), \ + (__v8hi)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_dbsad_epu8(U, X, Y, C) \ + ((__m128i) __builtin_ia32_dbpsadbw128_mask ((__v16qi)(__m128i) (X), \ + (__v16qi)(__m128i) (Y), (int) (C), \ + (__v8hi)(__m128i)_mm_setzero_si128(), \ + (__mmask8)(U))) + +#define _mm_mask_blend_epi16(__U, __A, __W) \ + ((__m128i) __builtin_ia32_blendmw_128_mask ((__v8hi) (__A), \ + (__v8hi) (__W), \ + (__mmask8) (__U))) + +#define _mm_mask_blend_epi8(__U, __A, __W) \ + ((__m128i) __builtin_ia32_blendmb_128_mask ((__v16qi) (__A), \ + (__v16qi) (__W), \ + (__mmask16) (__U))) + +#define _mm256_mask_blend_epi16(__U, __A, __W) \ + ((__m256i) __builtin_ia32_blendmw_256_mask ((__v16hi) (__A), \ + (__v16hi) (__W), \ + (__mmask16) (__U))) + +#define _mm256_mask_blend_epi8(__U, __A, __W) \ + ((__m256i) __builtin_ia32_blendmb_256_mask ((__v32qi) (__A), \ + (__v32qi) (__W), \ + (__mmask32) (__U))) + +#define _mm_cmp_epi16_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi)(__m128i)(X), \ + (__v8hi)(__m128i)(Y), (int)(P),\ + (__mmask8)(-1))) + +#define _mm_cmp_epi8_mask(X, Y, P) \ + ((__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(P),\ + (__mmask16)(-1))) + +#define _mm256_cmp_epi16_mask(X, Y, P) \ + ((__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi)(__m256i)(X), \ + (__v16hi)(__m256i)(Y), (int)(P),\ + (__mmask16)(-1))) + +#define _mm256_cmp_epi8_mask(X, Y, P) \ + ((__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi)(__m256i)(X), \ + (__v32qi)(__m256i)(Y), (int)(P),\ + (__mmask32)(-1))) + +#define _mm_cmp_epu16_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi)(__m128i)(X), \ + (__v8hi)(__m128i)(Y), (int)(P),\ + (__mmask8)(-1))) + +#define _mm_cmp_epu8_mask(X, Y, P) \ + ((__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(P),\ + (__mmask16)(-1))) + +#define _mm256_cmp_epu16_mask(X, Y, P) \ + ((__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi)(__m256i)(X), \ + (__v16hi)(__m256i)(Y), (int)(P),\ + (__mmask16)(-1))) + +#define _mm256_cmp_epu8_mask(X, Y, P) \ + ((__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi)(__m256i)(X), \ + (__v32qi)(__m256i)(Y), (int)(P),\ + (__mmask32)-1)) + +#define _mm_mask_cmp_epi16_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi)(__m128i)(X), \ + (__v8hi)(__m128i)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm_mask_cmp_epi8_mask(M, X, Y, P) \ + ((__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(P),\ + (__mmask16)(M))) + +#define _mm256_mask_cmp_epi16_mask(M, X, Y, P) \ + ((__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi)(__m256i)(X), \ + (__v16hi)(__m256i)(Y), (int)(P),\ + (__mmask16)(M))) + +#define _mm256_mask_cmp_epi8_mask(M, X, Y, P) \ + ((__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi)(__m256i)(X), \ + (__v32qi)(__m256i)(Y), (int)(P),\ + (__mmask32)(M))) + +#define _mm_mask_cmp_epu16_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi)(__m128i)(X), \ + (__v8hi)(__m128i)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm_mask_cmp_epu8_mask(M, X, Y, P) \ + ((__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(P),\ + (__mmask16)(M))) + +#define _mm256_mask_cmp_epu16_mask(M, X, Y, P) \ + ((__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi)(__m256i)(X), \ + (__v16hi)(__m256i)(Y), (int)(P),\ + (__mmask16)(M))) + +#define _mm256_mask_cmp_epu8_mask(M, X, Y, P) \ + ((__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi)(__m256i)(X), \ + (__v32qi)(__m256i)(Y), (int)(P),\ + (__mmask32)(M))) +#endif + +extern __inline __mmask32 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpneq_epi8_mask (__m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 4, + (__mmask32) -1); +} + +extern __inline __mmask32 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmplt_epi8_mask (__m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 1, + (__mmask32) -1); +} + +extern __inline __mmask32 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpge_epi8_mask (__m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 5, + (__mmask32) -1); +} + +extern __inline __mmask32 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmple_epi8_mask (__m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 2, + (__mmask32) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpneq_epi16_mask (__m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 4, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmplt_epi16_mask (__m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 1, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpge_epi16_mask (__m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 5, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmple_epi16_mask (__m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 2, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_epu8_mask (__m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 4, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epu8_mask (__m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 1, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_epu8_mask (__m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 5, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_epu8_mask (__m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 2, + (__mmask16) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_epu16_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 4, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epu16_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 1, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_epu16_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 5, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_epu16_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 2, + (__mmask8) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_epi8_mask (__m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 4, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi8_mask (__m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 1, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_epi8_mask (__m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 5, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_epi8_mask (__m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 2, + (__mmask16) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_epi16_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 4, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi16_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 1, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_epi16_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 5, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_epi16_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 2, + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mulhrs_epi16 (__m256i __W, __mmask16 __U, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X, + (__v16hi) __Y, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mulhrs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X, + (__v16hi) __Y, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mulhi_epu16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mulhi_epu16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mulhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mulhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mulhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mulhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mulhi_epu16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mulhi_epu16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mulhrs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, + __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X, + (__v8hi) __Y, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mulhrs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X, + (__v8hi) __Y, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mullo_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mullo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mullo_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mullo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi8_epi16 (__m256i __W, __mmask16 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovsxbw256_mask ((__v16qi) __A, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi8_epi16 (__mmask16 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovsxbw256_mask ((__v16qi) __A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi8_epi16 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsxbw128_mask ((__v16qi) __A, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi8_epi16 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsxbw128_mask ((__v16qi) __A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu8_epi16 (__m256i __W, __mmask16 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovzxbw256_mask ((__v16qi) __A, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovzxbw256_mask ((__v16qi) __A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu8_epi16 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovzxbw128_mask ((__v16qi) __A, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu8_epi16 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovzxbw128_mask ((__v16qi) __A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_avg_epu8 (__m256i __W, __mmask32 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_avg_epu8 (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_avg_epu8 (__m128i __W, __mmask16 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_avg_epu8 (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_avg_epu16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_avg_epu16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_avg_epu16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_avg_epu16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_add_epi8 (__m256i __W, __mmask32 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_add_epi8 (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_add_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_add_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_adds_epi8 (__m256i __W, __mmask32 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_adds_epi8 (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_adds_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_adds_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_adds_epu8 (__m256i __W, __mmask32 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_adds_epu8 (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_adds_epu16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_adds_epu16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sub_epi8 (__m256i __W, __mmask32 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sub_epi8 (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sub_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sub_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_subs_epi8 (__m256i __W, __mmask32 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_subs_epi8 (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_subs_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_subs_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_subs_epu8 (__m256i __W, __mmask32 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_subs_epu8 (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_subs_epu16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_subs_epu16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_epi8 (__m128i __W, __mmask16 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_epi8 (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_unpackhi_epi8 (__m256i __W, __mmask32 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_punpckhbw256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_unpackhi_epi8 (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_punpckhbw256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_unpackhi_epi8 (__m128i __W, __mmask16 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_punpckhbw128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_unpackhi_epi8 (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_punpckhbw128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_unpackhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_punpckhwd256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_unpackhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_punpckhwd256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_unpackhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_punpckhwd128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_unpackhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_punpckhwd128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_unpacklo_epi8 (__m256i __W, __mmask32 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_punpcklbw256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_unpacklo_epi8 (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_punpcklbw256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_unpacklo_epi8 (__m128i __W, __mmask16 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_punpcklbw128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_unpacklo_epi8 (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_punpcklbw128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_unpacklo_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_punpcklwd256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_unpacklo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_punpcklwd256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_unpacklo_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_punpcklwd128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_unpacklo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_punpcklwd128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi8_mask (__m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_pcmpeqb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epu8_mask (__m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __A, + (__v16qi) __B, 0, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpeq_epu8_mask (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __A, + (__v16qi) __B, 0, + __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpeq_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_pcmpeqb128_mask ((__v16qi) __A, + (__v16qi) __B, + __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpeq_epu8_mask (__m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __A, + (__v32qi) __B, 0, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpeq_epi8_mask (__m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_pcmpeqb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpeq_epu8_mask (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __A, + (__v32qi) __B, 0, + __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpeq_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_pcmpeqb256_mask ((__v32qi) __A, + (__v32qi) __B, + __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epu16_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __A, + (__v8hi) __B, 0, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi16_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_pcmpeqw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpeq_epu16_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __A, + (__v8hi) __B, 0, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpeq_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_pcmpeqw128_mask ((__v8hi) __A, + (__v8hi) __B, __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpeq_epu16_mask (__m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __A, + (__v16hi) __B, 0, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpeq_epi16_mask (__m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_pcmpeqw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpeq_epu16_mask (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __A, + (__v16hi) __B, 0, + __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpeq_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_pcmpeqw256_mask ((__v16hi) __A, + (__v16hi) __B, + __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epu8_mask (__m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __A, + (__v16qi) __B, 6, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi8_mask (__m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_pcmpgtb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpgt_epu8_mask (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __A, + (__v16qi) __B, 6, + __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpgt_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_pcmpgtb128_mask ((__v16qi) __A, + (__v16qi) __B, + __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpgt_epu8_mask (__m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __A, + (__v32qi) __B, 6, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpgt_epi8_mask (__m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_pcmpgtb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpgt_epu8_mask (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __A, + (__v32qi) __B, 6, + __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpgt_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_pcmpgtb256_mask ((__v32qi) __A, + (__v32qi) __B, + __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epu16_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __A, + (__v8hi) __B, 6, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi16_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_pcmpgtw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpgt_epu16_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __A, + (__v8hi) __B, 6, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpgt_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_pcmpgtw128_mask ((__v8hi) __A, + (__v8hi) __B, __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpgt_epu16_mask (__m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __A, + (__v16hi) __B, 6, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpgt_epi16_mask (__m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_pcmpgtw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpgt_epu16_mask (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __A, + (__v16hi) __B, 6, + __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpgt_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_pcmpgtw256_mask ((__v16hi) __A, + (__v16hi) __B, + __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testn_epi8_mask (__m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_ptestnmb128 ((__v16qi) __A, + (__v16qi) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_testn_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_ptestnmb128 ((__v16qi) __A, + (__v16qi) __B, __U); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testn_epi8_mask (__m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_ptestnmb256 ((__v32qi) __A, + (__v32qi) __B, + (__mmask32) -1); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_testn_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_ptestnmb256 ((__v32qi) __A, + (__v32qi) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testn_epi16_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmw128 ((__v8hi) __A, + (__v8hi) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_testn_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmw128 ((__v8hi) __A, + (__v8hi) __B, __U); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testn_epi16_mask (__m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_ptestnmw256 ((__v16hi) __A, + (__v16hi) __B, + (__mmask16) -1); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_testn_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__mmask16) __builtin_ia32_ptestnmw256 ((__v16hi) __A, + (__v16hi) __B, __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shuffle_epi8 (__m256i __W, __mmask32 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shuffle_epi8 (__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shuffle_epi8 (__m128i __W, __mmask16 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shuffle_epi8 (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_packs_epi16 (__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v32qi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_packs_epi16 (__m256i __W, __mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v32qi) __W, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_packs_epi16 (__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_packs_epi16 (__m128i __W, __mmask16 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v16qi) __W, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_packus_epi16 (__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v32qi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_packus_epi16 (__m256i __W, __mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v32qi) __W, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_packus_epi16 (__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_packus_epi16 (__m128i __W, __mmask16 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v16qi) __W, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_abs_epi8 (__m256i __W, __mmask32 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A, + (__v32qi) __W, + (__mmask32) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_abs_epi8 (__m128i __W, __mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_abs_epi8 (__mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_abs_epi16 (__m256i __W, __mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_abs_epi16 (__mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_abs_epi16 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_abs_epi16 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __mmask32 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpneq_epu8_mask (__m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 4, + (__mmask32) -1); +} + +extern __inline __mmask32 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmplt_epu8_mask (__m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 1, + (__mmask32) -1); +} + +extern __inline __mmask32 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpge_epu8_mask (__m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 5, + (__mmask32) -1); +} + +extern __inline __mmask32 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmple_epu8_mask (__m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 2, + (__mmask32) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpneq_epu16_mask (__m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 4, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmplt_epu16_mask (__m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 1, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpge_epu16_mask (__m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 5, + (__mmask16) -1); +} + +extern __inline __mmask16 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmple_epu16_mask (__m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 2, + (__mmask16) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu_epi16 (void *__P, __m256i __A) +{ + *(__v16hi_u *) __P = (__v16hi_u) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A) +{ + __builtin_ia32_storedquhi256_mask ((short *) __P, + (__v16hi) __A, + (__mmask16) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_epi16 (void *__P, __m128i __A) +{ + *(__v8hi_u *) __P = (__v8hi_u) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_storedquhi128_mask ((short *) __P, + (__v8hi) __A, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_adds_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_subs_epi8 (__m128i __W, __mmask16 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_subs_epi8 (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_subs_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_subs_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_subs_epu8 (__m128i __W, __mmask16 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_subs_epu8 (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_subs_epu16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_subs_epu16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srl_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m128i __B) +{ + return (__m256i) __builtin_ia32_psrlw256_mask ((__v16hi) __A, + (__v8hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srl_epi16 (__mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i) __builtin_ia32_psrlw256_mask ((__v16hi) __A, + (__v8hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srl_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psrlw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srl_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psrlw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sra_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m128i __B) +{ + return (__m256i) __builtin_ia32_psraw256_mask ((__v16hi) __A, + (__v8hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sra_epi16 (__mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i) __builtin_ia32_psraw256_mask ((__v16hi) __A, + (__v8hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sra_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psraw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sra_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psraw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_adds_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_adds_epu8 (__m128i __W, __mmask16 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_adds_epu8 (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_adds_epu16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_adds_epu16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_epi8 (__m128i __W, __mmask16 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_epi8 (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_adds_epi8 (__m128i __W, __mmask16 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_adds_epi8 (__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi16_epi8 (__m128i __A) +{ + + return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi16_storeu_epi8 (void * __P, __mmask8 __M,__m128i __A) +{ + __builtin_ia32_pmovwb128mem_mask ((unsigned long long *) __P , (__v8hi) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srav_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srav_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srav_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srav_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srav_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srav_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srlv_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srlv_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srlv_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srlv_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srlv_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srlv_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sllv_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sllv_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sllv_epi16 (__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sllv_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sllv_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sllv_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sll_epi16 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psllw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sll_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psllw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sll_epi16 (__m256i __W, __mmask16 __U, __m256i __A, + __m128i __B) +{ + return (__m256i) __builtin_ia32_psllw256_mask ((__v16hi) __A, + (__v8hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sll_epi16 (__mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i) __builtin_ia32_psllw256_mask ((__v16hi) __A, + (__v8hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_packus_epi32 (__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A, + (__v8si) __B, + (__v16hi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_packus_epi32 (__m256i __W, __mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A, + (__v8si) __B, + (__v16hi) __W, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_packus_epi32 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A, + (__v4si) __B, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_packus_epi32 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A, + (__v4si) __B, + (__v8hi) __W, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_packs_epi32 (__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A, + (__v8si) __B, + (__v16hi) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_packs_epi32 (__m256i __W, __mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A, + (__v8si) __B, + (__v16hi) __W, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_packs_epi32 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A, + (__v4si) __B, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_packs_epi32 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A, + (__v4si) __B, + (__v8hi) __W, __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpneq_epu8_mask (__mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 4, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmplt_epu8_mask (__mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 1, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpge_epu8_mask (__mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 5, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmple_epu8_mask (__mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 2, + (__mmask16) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpneq_epu16_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 4, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmplt_epu16_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 1, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpge_epu16_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 5, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmple_epu16_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 2, + (__mmask8) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpneq_epi8_mask (__mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 4, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmplt_epi8_mask (__mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 1, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpge_epi8_mask (__mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 5, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmple_epi8_mask (__mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__mmask16) __builtin_ia32_cmpb128_mask ((__v16qi) __X, + (__v16qi) __Y, 2, + (__mmask16) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpneq_epi16_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 4, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmplt_epi16_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 1, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpge_epi16_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 5, + (__mmask8) __M); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmple_epi16_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpw128_mask ((__v8hi) __X, + (__v8hi) __Y, 2, + (__mmask8) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpneq_epu8_mask (__mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 4, + (__mmask32) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmplt_epu8_mask (__mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 1, + (__mmask32) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpge_epu8_mask (__mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 5, + (__mmask32) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmple_epu8_mask (__mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 2, + (__mmask32) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpneq_epu16_mask (__mmask16 __M, __m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 4, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmplt_epu16_mask (__mmask16 __M, __m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 1, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpge_epu16_mask (__mmask16 __M, __m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 5, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmple_epu16_mask (__mmask16 __M, __m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_ucmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 2, + (__mmask16) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpneq_epi8_mask (__mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 4, + (__mmask32) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmplt_epi8_mask (__mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 1, + (__mmask32) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpge_epi8_mask (__mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 5, + (__mmask32) __M); +} + +extern __inline __mmask32 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmple_epi8_mask (__mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__mmask32) __builtin_ia32_cmpb256_mask ((__v32qi) __X, + (__v32qi) __Y, 2, + (__mmask32) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpneq_epi16_mask (__mmask16 __M, __m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 4, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmplt_epi16_mask (__mmask16 __M, __m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 1, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpge_epi16_mask (__mmask16 __M, __m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 5, + (__mmask16) __M); +} + +extern __inline __mmask16 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmple_epi16_mask (__mmask16 __M, __m256i __X, __m256i __Y) +{ + return (__mmask16) __builtin_ia32_cmpw256_mask ((__v16hi) __X, + (__v16hi) __Y, 2, + (__mmask16) __M); +} + +#ifdef __DISABLE_AVX512VLBW__ +#undef __DISABLE_AVX512VLBW__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VLBW__ */ + +#endif /* _AVX512VLBWINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vldqintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vldqintrin.h new file mode 100644 index 0000000..c40bd6e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vldqintrin.h @@ -0,0 +1,2016 @@ +/* Copyright (C) 2014-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VLDQINTRIN_H_INCLUDED +#define _AVX512VLDQINTRIN_H_INCLUDED + +#if !defined(__AVX512VL__) || !defined(__AVX512DQ__) +#pragma GCC push_options +#pragma GCC target("avx512vl,avx512dq") +#define __DISABLE_AVX512VLDQ__ +#endif /* __AVX512VLDQ__ */ + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttpd_epi64 (__m256d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttpd_epi64 (__mmask8 __U, __m256d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttpd_epi64 (__m128d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttpd_epi64 (__mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttpd_epu64 (__m256d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttpd_epu64 (__mmask8 __U, __m256d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttpd_epu64 (__m128d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttpd_epu64 (__mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtpd_epi64 (__m256d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtpd_epi64 (__mmask8 __U, __m256d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_epi64 (__m128d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtpd_epi64 (__mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtpd_epu64 (__m256d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtpd_epu64 (__mmask8 __U, __m256d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_epu64 (__m128d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtpd_epu64 (__mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttps_epi64 (__m128 __A) +{ + return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) +{ + return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) +{ + return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttps_epi64 (__m128 __A) +{ + return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttps_epu64 (__m128 __A) +{ + return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) +{ + return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) +{ + return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttps_epu64 (__m128 __A) +{ + return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_f64x2 (__m128d __A) +{ + return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df) + __A, + (__v4df)_mm256_undefined_pd(), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_broadcast_f64x2 (__m256d __O, __mmask8 __M, __m128d __A) +{ + return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df) + __A, + (__v4df) + __O, __M); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A) +{ + return (__m256d) __builtin_ia32_broadcastf64x2_256_mask ((__v2df) + __A, + (__v4df) + _mm256_setzero_ps (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_i64x2 (__m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di) + __A, + (__v4di)_mm256_undefined_si256(), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_broadcast_i64x2 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di) + __A, + (__v4di) + __O, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti64x2_256_mask ((__v2di) + __A, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_f32x2 (__m128 __A) +{ + return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A, + (__v8sf)_mm256_undefined_ps(), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_broadcast_f32x2 (__m256 __O, __mmask8 __M, __m128 __A) +{ + return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A, + (__v8sf) __O, + __M); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_broadcast_f32x2 (__mmask8 __M, __m128 __A) +{ + return (__m256) __builtin_ia32_broadcastf32x2_256_mask ((__v4sf) __A, + (__v8sf) + _mm256_setzero_ps (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_i32x2 (__m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si) + __A, + (__v8si)_mm256_undefined_si256(), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_broadcast_i32x2 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si) + __A, + (__v8si) + __O, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti32x2_256_mask ((__v4si) + __A, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_broadcast_i32x2 (__m128i __A) +{ + return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si) + __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_broadcast_i32x2 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si) + __A, + (__v4si) + __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_broadcasti32x2_128_mask ((__v4si) + __A, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mullo_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v4du) __A * (__v4du) __B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mullo_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mullo_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mullo_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v2du) __A * (__v2du) __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mullo_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mullo_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_andnot_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) +{ + return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_andnot_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_andnot_pd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B) +{ + return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_andnot_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_andnot_ps (__m256 __W, __mmask8 __U, __m256 __A, + __m256 __B) +{ + return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_andnot_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_andnot_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_andnot_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtps_epi64 (__m128 __A) +{ + return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) +{ + return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) +{ + return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_epi64 (__m128 __A) +{ + return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtps_epu64 (__m128 __A) +{ + return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) +{ + return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) +{ + return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_epu64 (__m128 __A) +{ + return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi64_ps (__m256i __A) +{ + return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m256i __A) +{ + return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi64_ps (__mmask8 __U, __m256i __A) +{ + return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi64_ps (__m128i __A) +{ + return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi64_ps (__mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu64_ps (__m256i __A) +{ + return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m256i __A) +{ + return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) +{ + return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu64_ps (__m128i __A) +{ + return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu64_ps (__mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi64_pd (__m256i __A) +{ + return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi64_pd (__m256d __W, __mmask8 __U, __m256i __A) +{ + return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi64_pd (__mmask8 __U, __m256i __A) +{ + return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi64_pd (__m128i __A) +{ + return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi64_pd (__m128d __W, __mmask8 __U, __m128i __A) +{ + return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi64_pd (__mmask8 __U, __m128i __A) +{ + return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu64_pd (__m256i __A) +{ + return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu64_pd (__m256d __W, __mmask8 __U, __m256i __A) +{ + return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu64_pd (__mmask8 __U, __m256i __A) +{ + return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_and_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) +{ + return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_and_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_and_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_and_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_and_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_and_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_and_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_and_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu64_pd (__m128i __A) +{ + return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu64_pd (__m128d __W, __mmask8 __U, __m128i __A) +{ + return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu64_pd (__mmask8 __U, __m128i __A) +{ + return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_xor_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) +{ + return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_xor_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_xor_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_xor_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_xor_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_xor_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_xor_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_or_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_or_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_or_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_or_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_or_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_or_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_or_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_or_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movm_epi32 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2d128 (__A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movm_epi32 (__mmask8 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2d256 (__A); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movm_epi64 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2q128 (__A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movm_epi64 (__mmask8 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2q256 (__A); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movepi32_mask (__m128i __A) +{ + return (__mmask8) __builtin_ia32_cvtd2mask128 ((__v4si) __A); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movepi32_mask (__m256i __A) +{ + return (__mmask8) __builtin_ia32_cvtd2mask256 ((__v8si) __A); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movepi64_mask (__m128i __A) +{ + return (__mmask8) __builtin_ia32_cvtq2mask128 ((__v2di) __A); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movepi64_mask (__m256i __A) +{ + return (__mmask8) __builtin_ia32_cvtq2mask256 ((__v4di) __A); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extractf64x2_pd (__m256d __A, const int __imm) +{ + return (__m128d) __builtin_ia32_extractf64x2_256_mask ((__v4df) __A, + __imm, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_extractf64x2_pd (__m128d __W, __mmask8 __U, __m256d __A, + const int __imm) +{ + return (__m128d) __builtin_ia32_extractf64x2_256_mask ((__v4df) __A, + __imm, + (__v2df) __W, + (__mmask8) + __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_extractf64x2_pd (__mmask8 __U, __m256d __A, + const int __imm) +{ + return (__m128d) __builtin_ia32_extractf64x2_256_mask ((__v4df) __A, + __imm, + (__v2df) + _mm_setzero_pd (), + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extracti64x2_epi64 (__m256i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_extracti64x2_256_mask ((__v4di) __A, + __imm, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_extracti64x2_epi64 (__m128i __W, __mmask8 __U, __m256i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_extracti64x2_256_mask ((__v4di) __A, + __imm, + (__v2di) __W, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_extracti64x2_epi64 (__mmask8 __U, __m256i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_extracti64x2_256_mask ((__v4di) __A, + __imm, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_reduce_pd (__m256d __A, int __B) +{ + return (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_reduce_pd (__m256d __W, __mmask8 __U, __m256d __A, int __B) +{ + return (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_reduce_pd (__mmask8 __U, __m256d __A, int __B) +{ + return (__m256d) __builtin_ia32_reducepd256_mask ((__v4df) __A, __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_pd (__m128d __A, int __B) +{ + return (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_reduce_pd (__m128d __W, __mmask8 __U, __m128d __A, int __B) +{ + return (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_reduce_pd (__mmask8 __U, __m128d __A, int __B) +{ + return (__m128d) __builtin_ia32_reducepd128_mask ((__v2df) __A, __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_reduce_ps (__m256 __A, int __B) +{ + return (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_reduce_ps (__m256 __W, __mmask8 __U, __m256 __A, int __B) +{ + return (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_reduce_ps (__mmask8 __U, __m256 __A, int __B) +{ + return (__m256) __builtin_ia32_reduceps256_mask ((__v8sf) __A, __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_reduce_ps (__m128 __A, int __B) +{ + return (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_reduce_ps (__m128 __W, __mmask8 __U, __m128 __A, int __B) +{ + return (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_reduce_ps (__mmask8 __U, __m128 __A, int __B) +{ + return (__m128) __builtin_ia32_reduceps128_mask ((__v4sf) __A, __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_range_pd (__m256d __A, __m256d __B, int __C) +{ + return (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, + (__v4df) __B, __C, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_range_pd (__m256d __W, __mmask8 __U, + __m256d __A, __m256d __B, int __C) +{ + return (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, + (__v4df) __B, __C, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_range_pd (__mmask8 __U, __m256d __A, __m256d __B, int __C) +{ + return (__m256d) __builtin_ia32_rangepd256_mask ((__v4df) __A, + (__v4df) __B, __C, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_range_pd (__m128d __A, __m128d __B, int __C) +{ + return (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_range_pd (__m128d __W, __mmask8 __U, + __m128d __A, __m128d __B, int __C) +{ + return (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_range_pd (__mmask8 __U, __m128d __A, __m128d __B, int __C) +{ + return (__m128d) __builtin_ia32_rangepd128_mask ((__v2df) __A, + (__v2df) __B, __C, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_range_ps (__m256 __A, __m256 __B, int __C) +{ + return (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, + (__v8sf) __B, __C, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_range_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B, + int __C) +{ + return (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, + (__v8sf) __B, __C, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_range_ps (__mmask8 __U, __m256 __A, __m256 __B, int __C) +{ + return (__m256) __builtin_ia32_rangeps256_mask ((__v8sf) __A, + (__v8sf) __B, __C, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_range_ps (__m128 __A, __m128 __B, int __C) +{ + return (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_range_ps (__m128 __W, __mmask8 __U, + __m128 __A, __m128 __B, int __C) +{ + return (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_range_ps (__mmask8 __U, __m128 __A, __m128 __B, int __C) +{ + return (__m128) __builtin_ia32_rangeps128_mask ((__v4sf) __A, + (__v4sf) __B, __C, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fpclass_pd_mask (__mmask8 __U, __m256d __A, + const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclasspd256_mask ((__v4df) __A, + __imm, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fpclass_pd_mask (__m256d __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclasspd256_mask ((__v4df) __A, + __imm, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fpclass_ps_mask (__mmask8 __U, __m256 __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclassps256_mask ((__v8sf) __A, + __imm, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fpclass_ps_mask (__m256 __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclassps256_mask ((__v8sf) __A, + __imm, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fpclass_pd_mask (__mmask8 __U, __m128d __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclasspd128_mask ((__v2df) __A, + __imm, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fpclass_pd_mask (__m128d __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclasspd128_mask ((__v2df) __A, + __imm, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fpclass_ps_mask (__mmask8 __U, __m128 __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclassps128_mask ((__v4sf) __A, + __imm, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fpclass_ps_mask (__m128 __A, const int __imm) +{ + return (__mmask8) __builtin_ia32_fpclassps128_mask ((__v4sf) __A, + __imm, + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_inserti64x2 (__m256i __A, __m128i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_inserti64x2_256_mask ((__v4di) __A, + (__v2di) __B, + __imm, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_inserti64x2 (__m256i __W, __mmask8 __U, __m256i __A, + __m128i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_inserti64x2_256_mask ((__v4di) __A, + (__v2di) __B, + __imm, + (__v4di) __W, + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_inserti64x2 (__mmask8 __U, __m256i __A, __m128i __B, + const int __imm) +{ + return (__m256i) __builtin_ia32_inserti64x2_256_mask ((__v4di) __A, + (__v2di) __B, + __imm, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insertf64x2 (__m256d __A, __m128d __B, const int __imm) +{ + return (__m256d) __builtin_ia32_insertf64x2_256_mask ((__v4df) __A, + (__v2df) __B, + __imm, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_insertf64x2 (__m256d __W, __mmask8 __U, __m256d __A, + __m128d __B, const int __imm) +{ + return (__m256d) __builtin_ia32_insertf64x2_256_mask ((__v4df) __A, + (__v2df) __B, + __imm, + (__v4df) __W, + (__mmask8) + __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_insertf64x2 (__mmask8 __U, __m256d __A, __m128d __B, + const int __imm) +{ + return (__m256d) __builtin_ia32_insertf64x2_256_mask ((__v4df) __A, + (__v2df) __B, + __imm, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) + __U); +} + +#else +#define _mm256_insertf64x2(X, Y, C) \ + ((__m256d) __builtin_ia32_insertf64x2_256_mask ((__v4df)(__m256d) (X),\ + (__v2df)(__m128d) (Y), (int) (C), \ + (__v4df)(__m256d)_mm256_setzero_pd(), \ + (__mmask8)-1)) + +#define _mm256_mask_insertf64x2(W, U, X, Y, C) \ + ((__m256d) __builtin_ia32_insertf64x2_256_mask ((__v4df)(__m256d) (X),\ + (__v2df)(__m128d) (Y), (int) (C), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_insertf64x2(U, X, Y, C) \ + ((__m256d) __builtin_ia32_insertf64x2_256_mask ((__v4df)(__m256d) (X),\ + (__v2df)(__m128d) (Y), (int) (C), \ + (__v4df)(__m256d)_mm256_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm256_inserti64x2(X, Y, C) \ + ((__m256i) __builtin_ia32_inserti64x2_256_mask ((__v4di)(__m256i) (X),\ + (__v2di)(__m128i) (Y), (int) (C), \ + (__v4di)(__m256i)_mm256_setzero_si256 (), \ + (__mmask8)-1)) + +#define _mm256_mask_inserti64x2(W, U, X, Y, C) \ + ((__m256i) __builtin_ia32_inserti64x2_256_mask ((__v4di)(__m256i) (X),\ + (__v2di)(__m128i) (Y), (int) (C), \ + (__v4di)(__m256i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_inserti64x2(U, X, Y, C) \ + ((__m256i) __builtin_ia32_inserti64x2_256_mask ((__v4di)(__m256i) (X),\ + (__v2di)(__m128i) (Y), (int) (C), \ + (__v4di)(__m256i)_mm256_setzero_si256 (), \ + (__mmask8)(U))) + +#define _mm256_extractf64x2_pd(X, C) \ + ((__m128d) __builtin_ia32_extractf64x2_256_mask ((__v4df)(__m256d) (X),\ + (int) (C), (__v2df)(__m128d) _mm_setzero_pd(), (__mmask8)-1)) + +#define _mm256_mask_extractf64x2_pd(W, U, X, C) \ + ((__m128d) __builtin_ia32_extractf64x2_256_mask ((__v4df)(__m256d) (X),\ + (int) (C), (__v2df)(__m128d) (W), (__mmask8) (U))) + +#define _mm256_maskz_extractf64x2_pd(U, X, C) \ + ((__m128d) __builtin_ia32_extractf64x2_256_mask ((__v4df)(__m256d) (X),\ + (int) (C), (__v2df)(__m128d) _mm_setzero_pd(), (__mmask8) (U))) + +#define _mm256_extracti64x2_epi64(X, C) \ + ((__m128i) __builtin_ia32_extracti64x2_256_mask ((__v4di)(__m256i) (X),\ + (int) (C), (__v2di)(__m128i) _mm_setzero_si128 (), (__mmask8)-1)) + +#define _mm256_mask_extracti64x2_epi64(W, U, X, C) \ + ((__m128i) __builtin_ia32_extracti64x2_256_mask ((__v4di)(__m256i) (X),\ + (int) (C), (__v2di)(__m128i) (W), (__mmask8) (U))) + +#define _mm256_maskz_extracti64x2_epi64(U, X, C) \ + ((__m128i) __builtin_ia32_extracti64x2_256_mask ((__v4di)(__m256i) (X),\ + (int) (C), (__v2di)(__m128i) _mm_setzero_si128 (), (__mmask8) (U))) + +#define _mm256_reduce_pd(A, B) \ + ((__m256d) __builtin_ia32_reducepd256_mask ((__v4df)(__m256d)(A), \ + (int)(B), (__v4df)_mm256_setzero_pd(), (__mmask8)-1)) + +#define _mm256_mask_reduce_pd(W, U, A, B) \ + ((__m256d) __builtin_ia32_reducepd256_mask ((__v4df)(__m256d)(A), \ + (int)(B), (__v4df)(__m256d)(W), (__mmask8)(U))) + +#define _mm256_maskz_reduce_pd(U, A, B) \ + ((__m256d) __builtin_ia32_reducepd256_mask ((__v4df)(__m256d)(A), \ + (int)(B), (__v4df)_mm256_setzero_pd(), (__mmask8)(U))) + +#define _mm_reduce_pd(A, B) \ + ((__m128d) __builtin_ia32_reducepd128_mask ((__v2df)(__m128d)(A), \ + (int)(B), (__v2df)_mm_setzero_pd(), (__mmask8)-1)) + +#define _mm_mask_reduce_pd(W, U, A, B) \ + ((__m128d) __builtin_ia32_reducepd128_mask ((__v2df)(__m128d)(A), \ + (int)(B), (__v2df)(__m128d)(W), (__mmask8)(U))) + +#define _mm_maskz_reduce_pd(U, A, B) \ + ((__m128d) __builtin_ia32_reducepd128_mask ((__v2df)(__m128d)(A), \ + (int)(B), (__v2df)_mm_setzero_pd(), (__mmask8)(U))) + +#define _mm256_reduce_ps(A, B) \ + ((__m256) __builtin_ia32_reduceps256_mask ((__v8sf)(__m256)(A), \ + (int)(B), (__v8sf)_mm256_setzero_ps(), (__mmask8)-1)) + +#define _mm256_mask_reduce_ps(W, U, A, B) \ + ((__m256) __builtin_ia32_reduceps256_mask ((__v8sf)(__m256)(A), \ + (int)(B), (__v8sf)(__m256)(W), (__mmask8)(U))) + +#define _mm256_maskz_reduce_ps(U, A, B) \ + ((__m256) __builtin_ia32_reduceps256_mask ((__v8sf)(__m256)(A), \ + (int)(B), (__v8sf)_mm256_setzero_ps(), (__mmask8)(U))) + +#define _mm_reduce_ps(A, B) \ + ((__m128) __builtin_ia32_reduceps128_mask ((__v4sf)(__m128)(A), \ + (int)(B), (__v4sf)_mm_setzero_ps(), (__mmask8)-1)) + +#define _mm_mask_reduce_ps(W, U, A, B) \ + ((__m128) __builtin_ia32_reduceps128_mask ((__v4sf)(__m128)(A), \ + (int)(B), (__v4sf)(__m128)(W), (__mmask8)(U))) + +#define _mm_maskz_reduce_ps(U, A, B) \ + ((__m128) __builtin_ia32_reduceps128_mask ((__v4sf)(__m128)(A), \ + (int)(B), (__v4sf)_mm_setzero_ps(), (__mmask8)(U))) + +#define _mm256_range_pd(A, B, C) \ + ((__m256d) __builtin_ia32_rangepd256_mask ((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)_mm256_setzero_pd(), (__mmask8)-1)) + +#define _mm256_maskz_range_pd(U, A, B, C) \ + ((__m256d) __builtin_ia32_rangepd256_mask ((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)_mm256_setzero_pd(), (__mmask8)(U))) + +#define _mm_range_pd(A, B, C) \ + ((__m128d) __builtin_ia32_rangepd128_mask ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)_mm_setzero_pd(), (__mmask8)-1)) + +#define _mm256_range_ps(A, B, C) \ + ((__m256) __builtin_ia32_rangeps256_mask ((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)_mm256_setzero_ps(), (__mmask8)-1)) + +#define _mm256_mask_range_ps(W, U, A, B, C) \ + ((__m256) __builtin_ia32_rangeps256_mask ((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)(__m256)(W), (__mmask8)(U))) + +#define _mm256_maskz_range_ps(U, A, B, C) \ + ((__m256) __builtin_ia32_rangeps256_mask ((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)_mm256_setzero_ps(), (__mmask8)(U))) + +#define _mm_range_ps(A, B, C) \ + ((__m128) __builtin_ia32_rangeps128_mask ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)_mm_setzero_ps(), (__mmask8)-1)) + +#define _mm_mask_range_ps(W, U, A, B, C) \ + ((__m128) __builtin_ia32_rangeps128_mask ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)(__m128)(W), (__mmask8)(U))) + +#define _mm_maskz_range_ps(U, A, B, C) \ + ((__m128) __builtin_ia32_rangeps128_mask ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)_mm_setzero_ps(), (__mmask8)(U))) + +#define _mm256_mask_range_pd(W, U, A, B, C) \ + ((__m256d) __builtin_ia32_rangepd256_mask ((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)(__m256d)(W), (__mmask8)(U))) + +#define _mm_mask_range_pd(W, U, A, B, C) \ + ((__m128d) __builtin_ia32_rangepd128_mask ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)(__m128d)(W), (__mmask8)(U))) + +#define _mm_maskz_range_pd(U, A, B, C) \ + ((__m128d) __builtin_ia32_rangepd128_mask ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)_mm_setzero_pd(), (__mmask8)(U))) + +#define _mm256_mask_fpclass_pd_mask(u, X, C) \ + ((__mmask8) __builtin_ia32_fpclasspd256_mask ((__v4df) (__m256d) (X), \ + (int) (C),(__mmask8)(u))) + +#define _mm256_mask_fpclass_ps_mask(u, X, C) \ + ((__mmask8) __builtin_ia32_fpclassps256_mask ((__v8sf) (__m256) (X), \ + (int) (C),(__mmask8)(u))) + +#define _mm_mask_fpclass_pd_mask(u, X, C) \ + ((__mmask8) __builtin_ia32_fpclasspd128_mask ((__v2df) (__m128d) (X), \ + (int) (C),(__mmask8)(u))) + +#define _mm_mask_fpclass_ps_mask(u, X, C) \ + ((__mmask8) __builtin_ia32_fpclassps128_mask ((__v4sf) (__m128) (X), \ + (int) (C),(__mmask8)(u))) + +#define _mm256_fpclass_pd_mask(X, C) \ + ((__mmask8) __builtin_ia32_fpclasspd256_mask ((__v4df) (__m256d) (X), \ + (int) (C),(__mmask8)-1)) + +#define _mm256_fpclass_ps_mask(X, C) \ + ((__mmask8) __builtin_ia32_fpclassps256_mask ((__v8sf) (__m256) (X), \ + (int) (C),(__mmask8)-1)) + +#define _mm_fpclass_pd_mask(X, C) \ + ((__mmask8) __builtin_ia32_fpclasspd128_mask ((__v2df) (__m128d) (X), \ + (int) (C),(__mmask8)-1)) + +#define _mm_fpclass_ps_mask(X, C) \ + ((__mmask8) __builtin_ia32_fpclassps128_mask ((__v4sf) (__m128) (X), \ + (int) (C),(__mmask8)-1)) + +#endif + +#ifdef __DISABLE_AVX512VLDQ__ +#undef __DISABLE_AVX512VLDQ__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VLDQ__ */ + +#endif /* _AVX512VLDQINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vlintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vlintrin.h new file mode 100644 index 0000000..26b286e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vlintrin.h @@ -0,0 +1,13896 @@ +/* Copyright (C) 2014-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VLINTRIN_H_INCLUDED +#define _AVX512VLINTRIN_H_INCLUDED + +#ifndef __AVX512VL__ +#pragma GCC push_options +#pragma GCC target("avx512vl") +#define __DISABLE_AVX512VL__ +#endif /* __AVX512VL__ */ + +/* Internal data types for implementing the intrinsics. */ +typedef unsigned int __mmask32; +typedef int __v4si_u __attribute__ ((__vector_size__ (16), \ + __may_alias__, __aligned__ (1))); +typedef int __v8si_u __attribute__ ((__vector_size__ (32), \ + __may_alias__, __aligned__ (1))); +typedef long long __v2di_u __attribute__ ((__vector_size__ (16), \ + __may_alias__, __aligned__ (1))); +typedef long long __v4di_u __attribute__ ((__vector_size__ (32), \ + __may_alias__, __aligned__ (1))); + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_movapd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mov_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_movapd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_movapd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mov_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_movapd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_load_pd (__mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_load_pd (__mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_store_pd (void *__P, __mmask8 __U, __m256d __A) +{ + __builtin_ia32_storeapd256_mask ((__v4df *) __P, + (__v4df) __A, + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_store_pd (void *__P, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_storeapd128_mask ((__v2df *) __P, + (__v2df) __A, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_movaps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mov_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_movaps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_movaps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mov_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_movaps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_load_ps (__mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_load_ps (__mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A) +{ + __builtin_ia32_storeaps256_mask ((__v8sf *) __P, + (__v8sf) __A, + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_store_ps (void *__P, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_storeaps128_mask ((__v4sf *) __P, + (__v4sf) __A, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_movdqa64_256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_movdqa64_256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_movdqa64_128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mov_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_movdqa64_128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_load_epi64 (void const *__P) +{ + return (__m256i) (*(__v4di *) __P); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P, + (__v4di) __W, + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_epi64 (void const *__P) +{ + return (__m128i) (*(__v2di *) __P); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P, + (__v2di) __W, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_movdqa64store256_mask ((__v4di *) __P, + (__v4di) __A, + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_movdqa64store128_mask ((__v2di *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_movdqa32_256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_movdqa32_256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_movdqa32_128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_movdqa32_128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_load_epi32 (void const *__P) +{ + return (__m256i) (*(__v8si *) __P); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P, + (__v8si) __W, + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_load_epi32 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_epi32 (void const *__P) +{ + return (__m128i) (*(__v4si *) __P); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P, + (__v4si) __W, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_load_epi32 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_store_epi32 (void *__P, __m256i __A) +{ + *(__v8si *) __P = (__v8si) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_movdqa32store256_mask ((__v8si *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_epi32 (void *__P, __m128i __A) +{ + *(__v4si *) __P = (__v4si) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_movdqa32store128_mask ((__v4si *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_add_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) +{ + return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_add_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_add_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_add_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sub_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) +{ + return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sub_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sub_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sub_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_store_epi64 (void *__P, __m256i __A) +{ + *(__m256i *) __P = __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_epi64 (void *__P, __m128i __A) +{ + *(__m128i *) __P = __A; +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadupd256_mask ((const double *) __P, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_loadu_pd (__mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadupd256_mask ((const double *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadupd128_mask ((const double *) __P, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_loadu_pd (__mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadupd128_mask ((const double *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_storeu_pd (void *__P, __mmask8 __U, __m256d __A) +{ + __builtin_ia32_storeupd256_mask ((double *) __P, + (__v4df) __A, + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_storeu_pd (void *__P, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_storeupd128_mask ((double *) __P, + (__v2df) __A, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadups256_mask ((const float *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_loadu_ps (__mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadups256_mask ((const float *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadups128_mask ((const float *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_loadu_ps (__mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadups128_mask ((const float *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_storeu_ps (void *__P, __mmask8 __U, __m256 __A) +{ + __builtin_ia32_storeups256_mask ((float *) __P, + (__v8sf) __A, + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_storeu_ps (void *__P, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_storeups128_mask ((float *) __P, + (__v4sf) __A, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu_epi64 (void const *__P) +{ + return (__m256i) (*(__v4di_u *) __P); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqudi256_mask ((const long long *) __P, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqudi256_mask ((const long long *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_epi64 (void const *__P) +{ + return (__m128i) (*(__v2di_u *) __P); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqudi128_mask ((const long long *) __P, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqudi128_mask ((const long long *) __P, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu_epi64 (void *__P, __m256i __A) +{ + *(__m256i_u *) __P = (__m256i_u) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_storedqudi256_mask ((long long *) __P, + (__v4di) __A, + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_epi64 (void *__P, __m128i __A) +{ + *(__m128i_u *) __P = (__m128i_u) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_storedqudi128_mask ((long long *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu_epi32 (void const *__P) +{ + return (__m256i) (*(__v8si_u *) __P); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqusi256_mask ((const int *) __P, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqusi256_mask ((const int *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_epi32 (void const *__P) +{ + return (__m128i) (*(__v4si_u *) __P); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqusi128_mask ((const int *) __P, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqusi128_mask ((const int *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu_epi32 (void *__P, __m256i __A) +{ + *(__m256i_u *) __P = (__m256i_u) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_storedqusi256_mask ((int *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_epi32 (void *__P, __m128i __A) +{ + *(__m128i_u *) __P = (__m128i_u) __A; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_storedqusi128_mask ((int *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_abs_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_abs_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_abs_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_abs_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_abs_epi64 (__m256i __A) +{ + return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_epi64 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtpd_epu32 (__m256d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_epu32 (__m128d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) +{ + return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A) +{ + return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttps_epu32 (__m256 __A) +{ + return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) +{ + return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) +{ + return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttps_epu32 (__m128 __A) +{ + return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttpd_epu32 (__m256d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttpd_epu32 (__m128d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) +{ + return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) +{ + return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) +{ + return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) +{ + return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) +{ + return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu32_pd (__m128i __A) +{ + return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) +{ + return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) +{ + return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu32_pd (__m128i __A) +{ + return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) +{ + return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) +{ + return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A) +{ + return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi32_ps (__mmask8 __U, __m256i __A) +{ + return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi32_ps (__mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepu32_ps (__m256i __A) +{ + return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A) +{ + return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) +{ + return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu32_ps (__m128i __A) +{ + return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A) +{ + return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A) +{ + return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A) +{ + return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A) +{ + return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi32_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovdb128mem_mask ((unsigned int *) __P, (__v4si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi32_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi32_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A, + (__v16qi) __O, __M); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovdb256mem_mask ((unsigned long long *) __P, (__v8si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi32_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsepi32_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsdb128mem_mask ((unsigned int *) __P, (__v4si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtsepi32_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtsepi32_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsdb256mem_mask ((unsigned long long *) __P, (__v8si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtsepi32_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtusepi32_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusdb128mem_mask ((unsigned int *) __P, (__v4si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A, + (__v16qi) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtusepi32_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtusepi32_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusdb256mem_mask ((unsigned long long *) __P, (__v8si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A, + (__v16qi) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtusepi32_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi32_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovdw128mem_mask ((unsigned long long *) __P, (__v4si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi32_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi32_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi32_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsepi32_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsdw128mem_mask ((unsigned long long *) __P, (__v4si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A, + (__v8hi)__O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtsepi32_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtsepi32_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A, + (__v8hi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtsepi32_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtusepi32_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A, + (__v8hi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusdw128mem_mask ((unsigned long long *) __P, (__v4si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtusepi32_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtusepi32_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A, + (__v8hi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtusepi32_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi64_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovqb128mem_mask ((unsigned short *) __P, (__v2di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi64_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi64_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovqb256mem_mask ((unsigned int *) __P, (__v4di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi64_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsepi64_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsqb128mem_mask ((unsigned short *) __P, (__v2di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtsepi64_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtsepi64_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsqb256mem_mask ((unsigned int *) __P, (__v4di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A, + (__v16qi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtsepi64_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtusepi64_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusqb128mem_mask ((unsigned short *) __P, (__v2di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A, + (__v16qi) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtusepi64_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtusepi64_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A, + (__v16qi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusqb256mem_mask ((unsigned int *) __P, (__v4di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A, + (__v16qi) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtusepi64_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi64_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A, + (__v8hi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovqw128mem_mask ((unsigned int *) __P, (__v2di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A, + (__v8hi)__O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi64_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi64_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A, + (__v8hi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovqw256mem_mask ((unsigned long long *) __P, (__v4di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi64_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsepi64_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A, + (__v8hi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsqw128mem_mask ((unsigned int *) __P, (__v2di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtsepi64_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtsepi64_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A, + (__v8hi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsqw256mem_mask ((unsigned long long *) __P, (__v4di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtsepi64_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtusepi64_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A, + (__v8hi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusqw128mem_mask ((unsigned int *) __P, (__v2di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtusepi64_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtusepi64_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A, + (__v8hi) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusqw256mem_mask ((unsigned long long *) __P, (__v4di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A, + (__v8hi) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtusepi64_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A, + (__v8hi) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi64_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A, + (__v4si) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovqd128mem_mask ((unsigned long long *) __P, + (__v2di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A, + (__v4si) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi64_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi64_epi32 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A, + (__v4si) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A, + (__v4si) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsepi64_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A, + (__v4si) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsqd128mem_mask ((unsigned long long *) __P, (__v2di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A, + (__v4si) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtsepi64_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtsepi64_epi32 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A, + (__v4si) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A, + (__v4si)__O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtsepi64_epi32 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtusepi64_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A, + (__v4si) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusqd128mem_mask ((unsigned long long *) __P, (__v2di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A, + (__v4si) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtusepi64_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtusepi64_epi32 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A, + (__v4si) + _mm_undefined_si128 (), + (__mmask8) -1); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A, + (__v4si) __O, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtusepi64_epi32 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_broadcastss_ps (__m256 __O, __mmask8 __M, __m128 __A) +{ + return (__m256) __builtin_ia32_broadcastss256_mask ((__v4sf) __A, + (__v8sf) __O, + __M); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_broadcastss_ps (__mmask8 __M, __m128 __A) +{ + return (__m256) __builtin_ia32_broadcastss256_mask ((__v4sf) __A, + (__v8sf) + _mm256_setzero_ps (), + __M); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_broadcastss_ps (__m128 __O, __mmask8 __M, __m128 __A) +{ + return (__m128) __builtin_ia32_broadcastss128_mask ((__v4sf) __A, + (__v4sf) __O, + __M); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_broadcastss_ps (__mmask8 __M, __m128 __A) +{ + return (__m128) __builtin_ia32_broadcastss128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + __M); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_broadcastsd_pd (__m256d __O, __mmask8 __M, __m128d __A) +{ + return (__m256d) __builtin_ia32_broadcastsd256_mask ((__v2df) __A, + (__v4df) __O, + __M); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A) +{ + return (__m256d) __builtin_ia32_broadcastsd256_mask ((__v2df) __A, + (__v4df) + _mm256_setzero_pd (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_broadcastd_epi32 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_pbroadcastd256_mask ((__v4si) __A, + (__v8si) __O, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_pbroadcastd256_mask ((__v4si) __A, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_set1_epi32 (__m256i __O, __mmask8 __M, int __A) +{ + return (__m256i) __builtin_ia32_pbroadcastd256_gpr_mask (__A, (__v8si) __O, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_set1_epi32 (__mmask8 __M, int __A) +{ + return (__m256i) __builtin_ia32_pbroadcastd256_gpr_mask (__A, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_broadcastd_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pbroadcastd128_mask ((__v4si) __A, + (__v4si) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pbroadcastd128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_set1_epi32 (__m128i __O, __mmask8 __M, int __A) +{ + return (__m128i) __builtin_ia32_pbroadcastd128_gpr_mask (__A, (__v4si) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_set1_epi32 (__mmask8 __M, int __A) +{ + return (__m128i) + __builtin_ia32_pbroadcastd128_gpr_mask (__A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_broadcastq_epi64 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_pbroadcastq256_mask ((__v2di) __A, + (__v4di) __O, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_pbroadcastq256_mask ((__v2di) __A, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_set1_epi64 (__m256i __O, __mmask8 __M, long long __A) +{ + return (__m256i) __builtin_ia32_pbroadcastq256_gpr_mask (__A, (__v4di) __O, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_set1_epi64 (__mmask8 __M, long long __A) +{ + return (__m256i) __builtin_ia32_pbroadcastq256_gpr_mask (__A, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_broadcastq_epi64 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pbroadcastq128_mask ((__v2di) __A, + (__v2di) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pbroadcastq128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A) +{ + return (__m128i) __builtin_ia32_pbroadcastq128_gpr_mask (__A, (__v2di) __O, + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_set1_epi64 (__mmask8 __M, long long __A) +{ + return (__m128i) + __builtin_ia32_pbroadcastq128_gpr_mask (__A, + (__v2di) _mm_setzero_si128 (), + __M); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_f32x4 (__m128 __A) +{ + return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A, + (__v8sf)_mm256_undefined_pd (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_broadcast_f32x4 (__m256 __O, __mmask8 __M, __m128 __A) +{ + return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A, + (__v8sf) __O, + __M); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_broadcast_f32x4 (__mmask8 __M, __m128 __A) +{ + return (__m256) __builtin_ia32_broadcastf32x4_256_mask ((__v4sf) __A, + (__v8sf) + _mm256_setzero_ps (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_i32x4 (__m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si) + __A, + (__v8si)_mm256_undefined_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_broadcast_i32x4 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si) + __A, + (__v8si) + __O, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_broadcast_i32x4 (__mmask8 __M, __m128i __A) +{ + return (__m256i) __builtin_ia32_broadcasti32x4_256_mask ((__v4si) + __A, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi8_epi32 (__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovsxbd256_mask ((__v16qi) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovsxbd256_mask ((__v16qi) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi8_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsxbd128_mask ((__v16qi) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsxbd128_mask ((__v16qi) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi8_epi64 (__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovsxbq256_mask ((__v16qi) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovsxbq256_mask ((__v16qi) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi8_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsxbq128_mask ((__v16qi) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsxbq128_mask ((__v16qi) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi16_epi32 (__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovsxwd256_mask ((__v8hi) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovsxwd256_mask ((__v8hi) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi16_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsxwd128_mask ((__v8hi) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsxwd128_mask ((__v8hi) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi16_epi64 (__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovsxwq256_mask ((__v8hi) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovsxwq256_mask ((__v8hi) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi16_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsxwq128_mask ((__v8hi) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsxwq128_mask ((__v8hi) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepi32_epi64 (__m256i __W, __mmask8 __U, __m128i __X) +{ + return (__m256i) __builtin_ia32_pmovsxdq256_mask ((__v4si) __X, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepi32_epi64 (__mmask8 __U, __m128i __X) +{ + return (__m256i) __builtin_ia32_pmovsxdq256_mask ((__v4si) __X, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepi32_epi64 (__m128i __W, __mmask8 __U, __m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxdq128_mask ((__v4si) __X, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepi32_epi64 (__mmask8 __U, __m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxdq128_mask ((__v4si) __X, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu8_epi32 (__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovzxbd256_mask ((__v16qi) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu8_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovzxbd256_mask ((__v16qi) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu8_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovzxbd128_mask ((__v16qi) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu8_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovzxbd128_mask ((__v16qi) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu8_epi64 (__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovzxbq256_mask ((__v16qi) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovzxbq256_mask ((__v16qi) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu8_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovzxbq128_mask ((__v16qi) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovzxbq128_mask ((__v16qi) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu16_epi32 (__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovzxwd256_mask ((__v8hi) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu16_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovzxwd256_mask ((__v8hi) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu16_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovzxwd128_mask ((__v8hi) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu16_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovzxwd128_mask ((__v8hi) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu16_epi64 (__m256i __W, __mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovzxwq256_mask ((__v8hi) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m256i) __builtin_ia32_pmovzxwq256_mask ((__v8hi) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu16_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovzxwq128_mask ((__v8hi) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovzxwq128_mask ((__v8hi) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtepu32_epi64 (__m256i __W, __mmask8 __U, __m128i __X) +{ + return (__m256i) __builtin_ia32_pmovzxdq256_mask ((__v4si) __X, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtepu32_epi64 (__mmask8 __U, __m128i __X) +{ + return (__m256i) __builtin_ia32_pmovzxdq256_mask ((__v4si) __X, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtepu32_epi64 (__m128i __W, __mmask8 __U, __m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxdq128_mask ((__v4si) __X, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtepu32_epi64 (__mmask8 __U, __m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxdq128_mask ((__v4si) __X, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rcp14_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_rcp14_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_rcp14_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp14_pd (__m128d __A) +{ + return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rcp14_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rcp14_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rcp14_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_rcp14_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp14_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rcp14_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rcp14_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rsqrt14_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_rsqrt14_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_rsqrt14_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt14_pd (__m128d __A) +{ + return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rsqrt14_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rsqrt14_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rsqrt14_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_rsqrt14_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt14_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rsqrt14_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rsqrt14_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sqrt_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sqrt_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sqrt_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sqrt_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sqrt_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sqrt_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sqrt_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sqrt_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_add_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_add_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_add_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_add_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sub_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sub_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sub_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sub_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_add_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_add_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sub_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_getexp_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_getexp_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getexp_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getexp_pd (__m128d __A) +{ + return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srl_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m128i __B) +{ + return (__m256i) __builtin_ia32_psrld256_mask ((__v8si) __A, + (__v4si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srl_epi32 (__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i) __builtin_ia32_psrld256_mask ((__v8si) __A, + (__v4si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srl_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psrld128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srl_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psrld128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srl_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m128i __B) +{ + return (__m256i) __builtin_ia32_psrlq256_mask ((__v4di) __A, + (__v2di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srl_epi64 (__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i) __builtin_ia32_psrlq256_mask ((__v4di) __A, + (__v2di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srl_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psrlq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srl_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psrlq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_and_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_and_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_scalef_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) +{ + return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_scalef_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A, + __m256 __B) +{ + return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_scalef_pd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_scalef_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmadd_pd (__m256d __A, __mmask8 __U, __m256d __B, + __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmadd_pd (__m256d __A, __m256d __B, __m256d __C, + __mmask8 __U) +{ + return (__m256d) __builtin_ia32_vfmaddpd256_mask3 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmadd_pd (__mmask8 __U, __m256d __A, __m256d __B, + __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_pd (__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_pd (__m128d __A, __m128d __B, __m128d __C, + __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfmaddpd128_mask3 ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_pd (__mmask8 __U, __m128d __A, __m128d __B, + __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmadd_ps (__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmadd_ps (__m256 __A, __m256 __B, __m256 __C, + __mmask8 __U) +{ + return (__m256) __builtin_ia32_vfmaddps256_mask3 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmadd_ps (__mmask8 __U, __m256 __A, __m256 __B, + __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmadd_ps (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmadd_ps (__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddps128_mask3 ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmadd_ps (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmsub_pd (__m256d __A, __mmask8 __U, __m256d __B, + __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmsubpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmsub_pd (__m256d __A, __m256d __B, __m256d __C, + __mmask8 __U) +{ + return (__m256d) __builtin_ia32_vfmsubpd256_mask3 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmsub_pd (__mmask8 __U, __m256d __A, __m256d __B, + __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmsubpd256_maskz ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsub_pd (__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmsubpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsub_pd (__m128d __A, __m128d __B, __m128d __C, + __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfmsubpd128_mask3 ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsub_pd (__mmask8 __U, __m128d __A, __m128d __B, + __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmsubpd128_maskz ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmsub_ps (__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_vfmsubps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmsub_ps (__m256 __A, __m256 __B, __m256 __C, + __mmask8 __U) +{ + return (__m256) __builtin_ia32_vfmsubps256_mask3 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmsub_ps (__mmask8 __U, __m256 __A, __m256 __B, + __m256 __C) +{ + return (__m256) __builtin_ia32_vfmsubps256_maskz ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsub_ps (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmsubps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsub_ps (__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmsubps128_mask3 ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsub_ps (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmsubps128_maskz ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmaddsub_pd (__m256d __A, __mmask8 __U, __m256d __B, + __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmaddsub_pd (__m256d __A, __m256d __B, __m256d __C, + __mmask8 __U) +{ + return (__m256d) __builtin_ia32_vfmaddsubpd256_mask3 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) + __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmaddsub_pd (__mmask8 __U, __m256d __A, __m256d __B, + __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) + __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmaddsub_pd (__m128d __A, __mmask8 __U, __m128d __B, + __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmaddsub_pd (__m128d __A, __m128d __B, __m128d __C, + __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfmaddsubpd128_mask3 ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) + __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmaddsub_pd (__mmask8 __U, __m128d __A, __m128d __B, + __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) + __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmaddsub_ps (__m256 __A, __mmask8 __U, __m256 __B, + __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmaddsub_ps (__m256 __A, __m256 __B, __m256 __C, + __mmask8 __U) +{ + return (__m256) __builtin_ia32_vfmaddsubps256_mask3 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmaddsub_ps (__mmask8 __U, __m256 __A, __m256 __B, + __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmaddsub_ps (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmaddsub_ps (__m128 __A, __m128 __B, __m128 __C, + __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddsubps128_mask3 ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmaddsub_ps (__mmask8 __U, __m128 __A, __m128 __B, + __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmsubadd_pd (__m256d __A, __mmask8 __U, __m256d __B, + __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmsubadd_pd (__m256d __A, __m256d __B, __m256d __C, + __mmask8 __U) +{ + return (__m256d) __builtin_ia32_vfmsubaddpd256_mask3 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) + __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmsubadd_pd (__mmask8 __U, __m256d __A, __m256d __B, + __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C, + (__mmask8) + __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsubadd_pd (__m128d __A, __mmask8 __U, __m128d __B, + __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsubadd_pd (__m128d __A, __m128d __B, __m128d __C, + __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfmsubaddpd128_mask3 ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) + __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsubadd_pd (__mmask8 __U, __m128d __A, __m128d __B, + __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C, + (__mmask8) + __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fmsubadd_ps (__m256 __A, __mmask8 __U, __m256 __B, + __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fmsubadd_ps (__m256 __A, __m256 __B, __m256 __C, + __mmask8 __U) +{ + return (__m256) __builtin_ia32_vfmsubaddps256_mask3 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fmsubadd_ps (__mmask8 __U, __m256 __A, __m256 __B, + __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fmsubadd_ps (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fmsubadd_ps (__m128 __A, __m128 __B, __m128 __C, + __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmsubaddps128_mask3 ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fmsubadd_ps (__mmask8 __U, __m128 __A, __m128 __B, + __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fnmadd_pd (__m256d __A, __mmask8 __U, __m256d __B, + __m256d __C) +{ + return (__m256d) __builtin_ia32_vfnmaddpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fnmadd_pd (__m256d __A, __m256d __B, __m256d __C, + __mmask8 __U) +{ + return (__m256d) __builtin_ia32_vfnmaddpd256_mask3 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fnmadd_pd (__mmask8 __U, __m256d __A, __m256d __B, + __m256d __C) +{ + return (__m256d) __builtin_ia32_vfnmaddpd256_maskz ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmadd_pd (__m128d __A, __mmask8 __U, __m128d __B, + __m128d __C) +{ + return (__m128d) __builtin_ia32_vfnmaddpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmadd_pd (__m128d __A, __m128d __B, __m128d __C, + __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfnmaddpd128_mask3 ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmadd_pd (__mmask8 __U, __m128d __A, __m128d __B, + __m128d __C) +{ + return (__m128d) __builtin_ia32_vfnmaddpd128_maskz ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fnmadd_ps (__m256 __A, __mmask8 __U, __m256 __B, + __m256 __C) +{ + return (__m256) __builtin_ia32_vfnmaddps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fnmadd_ps (__m256 __A, __m256 __B, __m256 __C, + __mmask8 __U) +{ + return (__m256) __builtin_ia32_vfnmaddps256_mask3 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fnmadd_ps (__mmask8 __U, __m256 __A, __m256 __B, + __m256 __C) +{ + return (__m256) __builtin_ia32_vfnmaddps256_maskz ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmadd_ps (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfnmaddps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmadd_ps (__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfnmaddps128_mask3 ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmadd_ps (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfnmaddps128_maskz ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fnmsub_pd (__m256d __A, __mmask8 __U, __m256d __B, + __m256d __C) +{ + return (__m256d) __builtin_ia32_vfnmsubpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fnmsub_pd (__m256d __A, __m256d __B, __m256d __C, + __mmask8 __U) +{ + return (__m256d) __builtin_ia32_vfnmsubpd256_mask3 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fnmsub_pd (__mmask8 __U, __m256d __A, __m256d __B, + __m256d __C) +{ + return (__m256d) __builtin_ia32_vfnmsubpd256_maskz ((__v4df) __A, + (__v4df) __B, + (__v4df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmsub_pd (__m128d __A, __mmask8 __U, __m128d __B, + __m128d __C) +{ + return (__m128d) __builtin_ia32_vfnmsubpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmsub_pd (__m128d __A, __m128d __B, __m128d __C, + __mmask8 __U) +{ + return (__m128d) __builtin_ia32_vfnmsubpd128_mask3 ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmsub_pd (__mmask8 __U, __m128d __A, __m128d __B, + __m128d __C) +{ + return (__m128d) __builtin_ia32_vfnmsubpd128_maskz ((__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fnmsub_ps (__m256 __A, __mmask8 __U, __m256 __B, + __m256 __C) +{ + return (__m256) __builtin_ia32_vfnmsubps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask3_fnmsub_ps (__m256 __A, __m256 __B, __m256 __C, + __mmask8 __U) +{ + return (__m256) __builtin_ia32_vfnmsubps256_mask3 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fnmsub_ps (__mmask8 __U, __m256 __A, __m256 __B, + __m256 __C) +{ + return (__m256) __builtin_ia32_vfnmsubps256_maskz ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fnmsub_ps (__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfnmsubps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask3_fnmsub_ps (__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfnmsubps128_mask3 ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fnmsub_ps (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfnmsubps128_maskz ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_and_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_and_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_andnot_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_andnot_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_andnot_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_or_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_or_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v8su)__A | (__v8su)__B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_or_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_or_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v4su)__A | (__v4su)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_xor_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_xor_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_xor_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v8su)__A ^ (__v8su)__B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_xor_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_xor_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v4su)__A ^ (__v4su)__B); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A) +{ + return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) +{ + return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A) +{ + return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A) +{ + return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) +{ + return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A) +{ + return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtps_epu32 (__m256 __A) +{ + return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) +{ + return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) +{ + return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_epu32 (__m128 __A) +{ + return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) +{ + return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_movedup_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_movddup256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_movddup256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_movedup_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_movddup128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_movedup_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_movddup128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_movehdup_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_movshdup256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_movehdup_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_movshdup256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_movshdup128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_movehdup_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_movshdup128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_moveldup_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_movsldup256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_movsldup256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_moveldup_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_movsldup128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_moveldup_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_movsldup128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_unpackhi_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_punpckhdq128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_unpackhi_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_punpckhdq128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_unpackhi_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_punpckhdq256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_unpackhi_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_punpckhdq256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_unpackhi_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_punpckhqdq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_unpackhi_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_punpckhqdq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_unpackhi_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_punpckhqdq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_unpackhi_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_punpckhqdq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_unpacklo_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_punpckldq128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_unpacklo_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_punpckldq128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_unpacklo_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_punpckldq256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_unpacklo_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_punpckldq256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_unpacklo_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_punpcklqdq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_unpacklo_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_punpcklqdq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_unpacklo_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_punpcklqdq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_unpacklo_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_punpcklqdq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epu32_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __A, + (__v4si) __B, 0, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi32_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_pcmpeqd128_mask ((__v4si) __A, + (__v4si) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpeq_epu32_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __A, + (__v4si) __B, 0, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpeq_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_pcmpeqd128_mask ((__v4si) __A, + (__v4si) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpeq_epu32_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __A, + (__v8si) __B, 0, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpeq_epi32_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_pcmpeqd256_mask ((__v8si) __A, + (__v8si) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpeq_epu32_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __A, + (__v8si) __B, 0, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpeq_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_pcmpeqd256_mask ((__v8si) __A, + (__v8si) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epu64_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __A, + (__v2di) __B, 0, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi64_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_pcmpeqq128_mask ((__v2di) __A, + (__v2di) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpeq_epu64_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __A, + (__v2di) __B, 0, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpeq_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_pcmpeqq128_mask ((__v2di) __A, + (__v2di) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpeq_epu64_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __A, + (__v4di) __B, 0, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpeq_epi64_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_pcmpeqq256_mask ((__v4di) __A, + (__v4di) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpeq_epu64_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __A, + (__v4di) __B, 0, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpeq_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_pcmpeqq256_mask ((__v4di) __A, + (__v4di) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epu32_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __A, + (__v4si) __B, 6, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi32_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_pcmpgtd128_mask ((__v4si) __A, + (__v4si) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpgt_epu32_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __A, + (__v4si) __B, 6, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpgt_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_pcmpgtd128_mask ((__v4si) __A, + (__v4si) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpgt_epu32_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __A, + (__v8si) __B, 6, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpgt_epi32_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_pcmpgtd256_mask ((__v8si) __A, + (__v8si) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpgt_epu32_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __A, + (__v8si) __B, 6, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpgt_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_pcmpgtd256_mask ((__v8si) __A, + (__v8si) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epu64_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __A, + (__v2di) __B, 6, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi64_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_pcmpgtq128_mask ((__v2di) __A, + (__v2di) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpgt_epu64_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __A, + (__v2di) __B, 6, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpgt_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_pcmpgtq128_mask ((__v2di) __A, + (__v2di) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpgt_epu64_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __A, + (__v4di) __B, 6, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpgt_epi64_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_pcmpgtq256_mask ((__v4di) __A, + (__v4di) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpgt_epu64_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __A, + (__v4di) __B, 6, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpgt_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_pcmpgtq256_mask ((__v4di) __A, + (__v4di) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_test_epi32_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestmd128 ((__v4si) __A, + (__v4si) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_test_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestmd128 ((__v4si) __A, + (__v4si) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_test_epi32_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestmd256 ((__v8si) __A, + (__v8si) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_test_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestmd256 ((__v8si) __A, + (__v8si) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_test_epi64_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestmq128 ((__v2di) __A, + (__v2di) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_test_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestmq128 ((__v2di) __A, + (__v2di) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_test_epi64_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestmq256 ((__v4di) __A, + (__v4di) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_test_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestmq256 ((__v4di) __A, + (__v4di) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testn_epi32_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmd128 ((__v4si) __A, + (__v4si) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_testn_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmd128 ((__v4si) __A, + (__v4si) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testn_epi32_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmd256 ((__v8si) __A, + (__v8si) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_testn_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmd256 ((__v8si) __A, + (__v8si) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testn_epi64_mask (__m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmq128 ((__v2di) __A, + (__v2di) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_testn_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmq128 ((__v2di) __A, + (__v2di) __B, __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testn_epi64_mask (__m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmq256 ((__v4di) __A, + (__v4di) __B, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_testn_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__mmask8) __builtin_ia32_ptestnmq256 ((__v4di) __A, + (__v4di) __B, __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A) +{ + __builtin_ia32_compressstoredf256_mask ((__v4df *) __P, + (__v4df) __A, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_compress_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_compressstoredf128_mask ((__v2df *) __P, + (__v2df) __A, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A) +{ + __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P, + (__v8sf) __A, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_compress_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P, + (__v4sf) __A, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_compressstoredi256_mask ((__v4di *) __P, + (__v4di) __A, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_compressstoredi128_mask ((__v2di *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_compressstoresi256_mask ((__v8si *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_compressstoresi128_mask ((__v4si *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_expanddf256_maskz ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P, + (__v4df) __W, + (__mmask8) + __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_expandloaddf256_maskz ((__v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) + __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_expand_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_expanddf128_maskz ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P, + (__v2df) __W, + (__mmask8) + __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_expandloaddf128_maskz ((__v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) + __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_expandsf256_maskz ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_expandloadsf256_maskz ((__v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) + __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_expand_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_expandsf128_maskz ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_expandloadsf128_maskz ((__v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_expanddi256_maskz ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U, + void const *__P) +{ + return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P, + (__v4di) __W, + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_expandloaddi256_maskz ((__v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_expanddi128_maskz ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P, + (__v2di) __W, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_expandloaddi128_maskz ((__v2di *) __P, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_expandsi256_maskz ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U, + void const *__P) +{ + return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P, + (__v8si) __W, + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_expandloadsi256_maskz ((__v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_expandsi128_maskz ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P, + (__v4si) __W, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_expandloadsi128_maskz ((__v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutex2var_pd (__m256d __A, __m256i __I, __m256d __B) +{ + return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I + /* idx */ , + (__v4df) __A, + (__v4df) __B, + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutex2var_pd (__m256d __A, __mmask8 __U, __m256i __I, + __m256d __B) +{ + return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I + /* idx */ , + (__v4df) __A, + (__v4df) __B, + (__mmask8) + __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask2_permutex2var_pd (__m256d __A, __m256i __I, __mmask8 __U, + __m256d __B) +{ + return (__m256d) __builtin_ia32_vpermi2varpd256_mask ((__v4df) __A, + (__v4di) __I + /* idx */ , + (__v4df) __B, + (__mmask8) + __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutex2var_pd (__mmask8 __U, __m256d __A, __m256i __I, + __m256d __B) +{ + return (__m256d) __builtin_ia32_vpermt2varpd256_maskz ((__v4di) __I + /* idx */ , + (__v4df) __A, + (__v4df) __B, + (__mmask8) + __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutex2var_ps (__m256 __A, __m256i __I, __m256 __B) +{ + return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I + /* idx */ , + (__v8sf) __A, + (__v8sf) __B, + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutex2var_ps (__m256 __A, __mmask8 __U, __m256i __I, + __m256 __B) +{ + return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I + /* idx */ , + (__v8sf) __A, + (__v8sf) __B, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask2_permutex2var_ps (__m256 __A, __m256i __I, __mmask8 __U, + __m256 __B) +{ + return (__m256) __builtin_ia32_vpermi2varps256_mask ((__v8sf) __A, + (__v8si) __I + /* idx */ , + (__v8sf) __B, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutex2var_ps (__mmask8 __U, __m256 __A, __m256i __I, + __m256 __B) +{ + return (__m256) __builtin_ia32_vpermt2varps256_maskz ((__v8si) __I + /* idx */ , + (__v8sf) __A, + (__v8sf) __B, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutex2var_epi64 (__m128i __A, __m128i __I, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I + /* idx */ , + (__v2di) __A, + (__v2di) __B, + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_permutex2var_epi64 (__m128i __A, __mmask8 __U, __m128i __I, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I + /* idx */ , + (__v2di) __A, + (__v2di) __B, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask2_permutex2var_epi64 (__m128i __A, __m128i __I, __mmask8 __U, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermi2varq128_mask ((__v2di) __A, + (__v2di) __I + /* idx */ , + (__v2di) __B, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_permutex2var_epi64 (__mmask8 __U, __m128i __A, __m128i __I, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2varq128_maskz ((__v2di) __I + /* idx */ , + (__v2di) __A, + (__v2di) __B, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutex2var_epi32 (__m128i __A, __m128i __I, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I + /* idx */ , + (__v4si) __A, + (__v4si) __B, + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_permutex2var_epi32 (__m128i __A, __mmask8 __U, __m128i __I, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I + /* idx */ , + (__v4si) __A, + (__v4si) __B, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask2_permutex2var_epi32 (__m128i __A, __m128i __I, __mmask8 __U, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermi2vard128_mask ((__v4si) __A, + (__v4si) __I + /* idx */ , + (__v4si) __B, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_permutex2var_epi32 (__mmask8 __U, __m128i __A, __m128i __I, + __m128i __B) +{ + return (__m128i) __builtin_ia32_vpermt2vard128_maskz ((__v4si) __I + /* idx */ , + (__v4si) __A, + (__v4si) __B, + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutex2var_epi64 (__m256i __A, __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I + /* idx */ , + (__v4di) __A, + (__v4di) __B, + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutex2var_epi64 (__m256i __A, __mmask8 __U, __m256i __I, + __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I + /* idx */ , + (__v4di) __A, + (__v4di) __B, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask2_permutex2var_epi64 (__m256i __A, __m256i __I, + __mmask8 __U, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermi2varq256_mask ((__v4di) __A, + (__v4di) __I + /* idx */ , + (__v4di) __B, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutex2var_epi64 (__mmask8 __U, __m256i __A, + __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2varq256_maskz ((__v4di) __I + /* idx */ , + (__v4di) __A, + (__v4di) __B, + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutex2var_epi32 (__m256i __A, __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I + /* idx */ , + (__v8si) __A, + (__v8si) __B, + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutex2var_epi32 (__m256i __A, __mmask8 __U, __m256i __I, + __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I + /* idx */ , + (__v8si) __A, + (__v8si) __B, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask2_permutex2var_epi32 (__m256i __A, __m256i __I, + __mmask8 __U, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermi2vard256_mask ((__v8si) __A, + (__v8si) __I + /* idx */ , + (__v8si) __B, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutex2var_epi32 (__mmask8 __U, __m256i __A, + __m256i __I, __m256i __B) +{ + return (__m256i) __builtin_ia32_vpermt2vard256_maskz ((__v8si) __I + /* idx */ , + (__v8si) __A, + (__v8si) __B, + (__mmask8) + __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutex2var_pd (__m128d __A, __m128i __I, __m128d __B) +{ + return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I + /* idx */ , + (__v2df) __A, + (__v2df) __B, + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_permutex2var_pd (__m128d __A, __mmask8 __U, __m128i __I, + __m128d __B) +{ + return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I + /* idx */ , + (__v2df) __A, + (__v2df) __B, + (__mmask8) + __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask2_permutex2var_pd (__m128d __A, __m128i __I, __mmask8 __U, + __m128d __B) +{ + return (__m128d) __builtin_ia32_vpermi2varpd128_mask ((__v2df) __A, + (__v2di) __I + /* idx */ , + (__v2df) __B, + (__mmask8) + __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_permutex2var_pd (__mmask8 __U, __m128d __A, __m128i __I, + __m128d __B) +{ + return (__m128d) __builtin_ia32_vpermt2varpd128_maskz ((__v2di) __I + /* idx */ , + (__v2df) __A, + (__v2df) __B, + (__mmask8) + __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutex2var_ps (__m128 __A, __m128i __I, __m128 __B) +{ + return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I + /* idx */ , + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_permutex2var_ps (__m128 __A, __mmask8 __U, __m128i __I, + __m128 __B) +{ + return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I + /* idx */ , + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask2_permutex2var_ps (__m128 __A, __m128i __I, __mmask8 __U, + __m128 __B) +{ + return (__m128) __builtin_ia32_vpermi2varps128_mask ((__v4sf) __A, + (__v4si) __I + /* idx */ , + (__v4sf) __B, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_permutex2var_ps (__mmask8 __U, __m128 __A, __m128i __I, + __m128 __B) +{ + return (__m128) __builtin_ia32_vpermt2varps128_maskz ((__v4si) __I + /* idx */ , + (__v4sf) __A, + (__v4sf) __B, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srav_epi64 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X, + (__v2di) __Y, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srav_epi64 (__m128i __W, __mmask8 __U, __m128i __X, + __m128i __Y) +{ + return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X, + (__v2di) __Y, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srav_epi64 (__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X, + (__v2di) __Y, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sllv_epi32 (__m256i __W, __mmask8 __U, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_psllv8si_mask ((__v8si) __X, + (__v8si) __Y, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sllv_epi32 (__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psllv8si_mask ((__v8si) __X, + (__v8si) __Y, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sllv_epi32 (__m128i __W, __mmask8 __U, __m128i __X, + __m128i __Y) +{ + return (__m128i) __builtin_ia32_psllv4si_mask ((__v4si) __X, + (__v4si) __Y, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sllv_epi32 (__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psllv4si_mask ((__v4si) __X, + (__v4si) __Y, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sllv_epi64 (__m256i __W, __mmask8 __U, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_psllv4di_mask ((__v4di) __X, + (__v4di) __Y, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sllv_epi64 (__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psllv4di_mask ((__v4di) __X, + (__v4di) __Y, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sllv_epi64 (__m128i __W, __mmask8 __U, __m128i __X, + __m128i __Y) +{ + return (__m128i) __builtin_ia32_psllv2di_mask ((__v2di) __X, + (__v2di) __Y, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sllv_epi64 (__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psllv2di_mask ((__v2di) __X, + (__v2di) __Y, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srav_epi32 (__m256i __W, __mmask8 __U, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_psrav8si_mask ((__v8si) __X, + (__v8si) __Y, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srav_epi32 (__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psrav8si_mask ((__v8si) __X, + (__v8si) __Y, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srav_epi32 (__m128i __W, __mmask8 __U, __m128i __X, + __m128i __Y) +{ + return (__m128i) __builtin_ia32_psrav4si_mask ((__v4si) __X, + (__v4si) __Y, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srav_epi32 (__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psrav4si_mask ((__v4si) __X, + (__v4si) __Y, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srlv_epi32 (__m256i __W, __mmask8 __U, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_psrlv8si_mask ((__v8si) __X, + (__v8si) __Y, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srlv_epi32 (__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psrlv8si_mask ((__v8si) __X, + (__v8si) __Y, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srlv_epi32 (__m128i __W, __mmask8 __U, __m128i __X, + __m128i __Y) +{ + return (__m128i) __builtin_ia32_psrlv4si_mask ((__v4si) __X, + (__v4si) __Y, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srlv_epi32 (__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psrlv4si_mask ((__v4si) __X, + (__v4si) __Y, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srlv_epi64 (__m256i __W, __mmask8 __U, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_psrlv4di_mask ((__v4di) __X, + (__v4di) __Y, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srlv_epi64 (__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psrlv4di_mask ((__v4di) __X, + (__v4di) __Y, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srlv_epi64 (__m128i __W, __mmask8 __U, __m128i __X, + __m128i __Y) +{ + return (__m128i) __builtin_ia32_psrlv2di_mask ((__v2di) __X, + (__v2di) __Y, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srlv_epi64 (__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psrlv2di_mask ((__v2di) __X, + (__v2di) __Y, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rolv_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_rolv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_rolv_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rolv_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rolv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rolv_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rorv_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_rorv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_rorv_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rorv_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rorv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rorv_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rolv_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_rolv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rolv_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rolv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rolv_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rorv_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_rorv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_rorv_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rorv_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rorv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rorv_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srav_epi64 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X, + (__v4di) __Y, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srav_epi64 (__m256i __W, __mmask8 __U, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X, + (__v4di) __Y, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X, + (__v4di) __Y, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_and_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_and_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_pd (), + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_and_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_and_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_pd (), + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_andnot_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_andnot_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_pd (), + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_andnot_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_andnot_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_pd (), + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_or_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_or_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_or_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v4du)__A | (__v4du)__B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_or_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_or_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v2du)__A | (__v2du)__B); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_xor_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_xor_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_xor_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) ((__v4du)__A ^ (__v4du)__B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_xor_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_xor_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v2du)__A ^ (__v2du)__B); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) +{ + return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_div_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_div_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_div_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_div_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) +{ + return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_div_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) +{ + return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_div_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_div_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_div_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mul_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mul_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mul_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mul_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mul_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mul_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mul_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) +{ + return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mul_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_epu64 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_epu64 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_epi32 (__mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_epi32 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_epi32 (__mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_epi32 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_max_epu32 (__mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_max_epu32 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_min_epu32 (__mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_min_epu32 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epu64 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epu64 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_epi32 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_epi32 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_epi32 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_epi32 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_max_epu32 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_max_epu32 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_min_epu32 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_min_epu32 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, __M); +} + +#ifndef __AVX512CD__ +#pragma GCC push_options +#pragma GCC target("avx512vl,avx512cd") +#define __DISABLE_AVX512VLCD__ +#endif + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_broadcastmb_epi64 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_broadcastmb128 (__A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcastmb_epi64 (__mmask8 __A) +{ + return (__m256i) __builtin_ia32_broadcastmb256 (__A); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_broadcastmw_epi32 (__mmask16 __A) +{ + return (__m128i) __builtin_ia32_broadcastmw128 (__A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcastmw_epi32 (__mmask16 __A) +{ + return (__m256i) __builtin_ia32_broadcastmw256 (__A); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_lzcnt_epi32 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_lzcnt_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_lzcnt_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_lzcnt_epi64 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_lzcnt_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_lzcnt_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_conflict_epi64 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_conflict_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_conflict_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_conflict_epi32 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_conflict_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_conflict_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_lzcnt_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_lzcnt_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_lzcnt_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_lzcnt_epi64 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_lzcnt_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_lzcnt_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_conflict_epi64 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_conflict_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_conflict_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_conflict_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_conflict_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_conflict_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +#ifdef __DISABLE_AVX512VLCD__ +#pragma GCC pop_options +#endif + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_unpacklo_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) +{ + return (__m256d) __builtin_ia32_unpcklpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_unpacklo_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_unpcklpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_unpacklo_pd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B) +{ + return (__m128d) __builtin_ia32_unpcklpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_unpacklo_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_unpcklpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_unpacklo_ps (__m256 __W, __mmask8 __U, __m256 __A, + __m256 __B) +{ + return (__m256) __builtin_ia32_unpcklps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_unpackhi_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) +{ + return (__m256d) __builtin_ia32_unpckhpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_unpackhi_pd (__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_unpckhpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_unpackhi_pd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B) +{ + return (__m128d) __builtin_ia32_unpckhpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_unpackhi_pd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_unpckhpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_unpackhi_ps (__m256 __W, __mmask8 __U, __m256 __A, + __m256 __B) +{ + return (__m256) __builtin_ia32_unpckhps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_unpackhi_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_unpckhps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_unpackhi_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_unpckhps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_unpackhi_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_unpckhps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtph_ps (__m128 __W, __mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtph_ps (__mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_unpacklo_ps (__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_unpcklps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtph_ps (__m256 __W, __mmask8 __U, __m128i __A) +{ + return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A) +{ + return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_unpacklo_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_unpcklps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_unpacklo_ps (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_unpcklps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sra_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m128i __B) +{ + return (__m256i) __builtin_ia32_psrad256_mask ((__v8si) __A, + (__v4si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sra_epi32 (__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i) __builtin_ia32_psrad256_mask ((__v8si) __A, + (__v4si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sra_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psrad128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sra_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psrad128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sra_epi64 (__m256i __A, __m128i __B) +{ + return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A, + (__v2di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sra_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m128i __B) +{ + return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A, + (__v2di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sra_epi64 (__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A, + (__v2di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sra_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sra_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sra_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sll_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pslld128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sll_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pslld128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_sll_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psllq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_sll_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psllq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sll_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m128i __B) +{ + return (__m256i) __builtin_ia32_pslld256_mask ((__v8si) __A, + (__v4si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sll_epi32 (__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i) __builtin_ia32_pslld256_mask ((__v8si) __A, + (__v4si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_sll_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m128i __B) +{ + return (__m256i) __builtin_ia32_psllq256_mask ((__v4di) __A, + (__v2di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_sll_epi64 (__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i) __builtin_ia32_psllq256_mask ((__v4di) __A, + (__v2di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutexvar_ps (__m256 __W, __mmask8 __U, __m256i __X, + __m256 __Y) +{ + return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y, + (__v8si) __X, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutexvar_ps (__mmask8 __U, __m256i __X, __m256 __Y) +{ + return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y, + (__v8si) __X, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutexvar_pd (__m256i __X, __m256d __Y) +{ + return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y, + (__v4di) __X, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutexvar_pd (__m256d __W, __mmask8 __U, __m256i __X, + __m256d __Y) +{ + return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y, + (__v4di) __X, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutexvar_pd (__mmask8 __U, __m256i __X, __m256d __Y) +{ + return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y, + (__v4di) __X, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutevar_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256i __C) +{ + return (__m256d) __builtin_ia32_vpermilvarpd256_mask ((__v4df) __A, + (__v4di) __C, + (__v4df) __W, + (__mmask8) + __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutevar_pd (__mmask8 __U, __m256d __A, __m256i __C) +{ + return (__m256d) __builtin_ia32_vpermilvarpd256_mask ((__v4df) __A, + (__v4di) __C, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) + __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutevar_ps (__m256 __W, __mmask8 __U, __m256 __A, + __m256i __C) +{ + return (__m256) __builtin_ia32_vpermilvarps256_mask ((__v8sf) __A, + (__v8si) __C, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutevar_ps (__mmask8 __U, __m256 __A, __m256i __C) +{ + return (__m256) __builtin_ia32_vpermilvarps256_mask ((__v8sf) __A, + (__v8si) __C, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_permutevar_pd (__m128d __W, __mmask8 __U, __m128d __A, + __m128i __C) +{ + return (__m128d) __builtin_ia32_vpermilvarpd_mask ((__v2df) __A, + (__v2di) __C, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_permutevar_pd (__mmask8 __U, __m128d __A, __m128i __C) +{ + return (__m128d) __builtin_ia32_vpermilvarpd_mask ((__v2df) __A, + (__v2di) __C, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_permutevar_ps (__m128 __W, __mmask8 __U, __m128 __A, + __m128i __C) +{ + return (__m128) __builtin_ia32_vpermilvarps_mask ((__v4sf) __A, + (__v4si) __C, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_permutevar_ps (__mmask8 __U, __m128 __A, __m128i __C) +{ + return (__m128) __builtin_ia32_vpermilvarps_mask ((__v4sf) __A, + (__v4si) __C, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mullo_epi32 (__mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutexvar_epi64 (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y, + (__v4di) __X, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mullo_epi32 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mullo_epi32 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mullo_epi32 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X, + (__v8si) __Y, + (__v4di) __W, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X, + (__v8si) __Y, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X, + __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X, + (__v4si) __Y, + (__v2di) __W, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X, + (__v4si) __Y, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutexvar_epi64 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y, + (__v4di) __X, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutexvar_epi64 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y, + (__v4di) __X, + (__v4di) __W, + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X, + (__v8si) __Y, + (__v4di) __W, __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutexvar_epi32 (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y, + (__v8si) __X, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X, + (__v8si) __Y, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X, + __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X, + (__v4si) __Y, + (__v2di) __W, __M); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X, + (__v4si) __Y, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutexvar_epi32 (__m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y, + (__v8si) __X, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutexvar_epi32 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y, + (__v8si) __X, + (__v8si) __W, + __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpneq_epu32_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, + (__v8si) __Y, 4, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpneq_epu32_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, + (__v8si) __Y, 4, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmplt_epu32_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, + (__v8si) __Y, 1, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmplt_epu32_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, + (__v8si) __Y, 1, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpge_epu32_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, + (__v8si) __Y, 5, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpge_epu32_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, + (__v8si) __Y, 5, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmple_epu32_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, + (__v8si) __Y, 2, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmple_epu32_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, + (__v8si) __Y, 2, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpneq_epu64_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, + (__v4di) __Y, 4, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpneq_epu64_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, + (__v4di) __Y, 4, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmplt_epu64_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, + (__v4di) __Y, 1, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmplt_epu64_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, + (__v4di) __Y, 1, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpge_epu64_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, + (__v4di) __Y, 5, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpge_epu64_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, + (__v4di) __Y, 5, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmple_epu64_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, + (__v4di) __Y, 2, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmple_epu64_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, + (__v4di) __Y, 2, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpneq_epi32_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, + (__v8si) __Y, 4, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpneq_epi32_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, + (__v8si) __Y, 4, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmplt_epi32_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, + (__v8si) __Y, 1, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmplt_epi32_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, + (__v8si) __Y, 1, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpge_epi32_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, + (__v8si) __Y, 5, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpge_epi32_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, + (__v8si) __Y, 5, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmple_epi32_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, + (__v8si) __Y, 2, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmple_epi32_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, + (__v8si) __Y, 2, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpneq_epi64_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, + (__v4di) __Y, 4, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpneq_epi64_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, + (__v4di) __Y, 4, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmplt_epi64_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, + (__v4di) __Y, 1, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmplt_epi64_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, + (__v4di) __Y, 1, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmpge_epi64_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, + (__v4di) __Y, 5, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmpge_epi64_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, + (__v4di) __Y, 5, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmple_epi64_mask (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, + (__v4di) __Y, 2, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmple_epi64_mask (__m256i __X, __m256i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, + (__v4di) __Y, 2, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpneq_epu32_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, + (__v4si) __Y, 4, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_epu32_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, + (__v4si) __Y, 4, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmplt_epu32_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, + (__v4si) __Y, 1, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epu32_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, + (__v4si) __Y, 1, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpge_epu32_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, + (__v4si) __Y, 5, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_epu32_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, + (__v4si) __Y, 5, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmple_epu32_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, + (__v4si) __Y, 2, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_epu32_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, + (__v4si) __Y, 2, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpneq_epu64_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, + (__v2di) __Y, 4, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_epu64_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, + (__v2di) __Y, 4, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmplt_epu64_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, + (__v2di) __Y, 1, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epu64_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, + (__v2di) __Y, 1, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpge_epu64_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, + (__v2di) __Y, 5, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_epu64_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, + (__v2di) __Y, 5, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmple_epu64_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, + (__v2di) __Y, 2, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_epu64_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, + (__v2di) __Y, 2, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpneq_epi32_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, + (__v4si) __Y, 4, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_epi32_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, + (__v4si) __Y, 4, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmplt_epi32_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, + (__v4si) __Y, 1, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi32_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, + (__v4si) __Y, 1, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpge_epi32_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, + (__v4si) __Y, 5, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_epi32_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, + (__v4si) __Y, 5, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmple_epi32_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, + (__v4si) __Y, 2, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_epi32_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, + (__v4si) __Y, 2, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpneq_epi64_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, + (__v2di) __Y, 4, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_epi64_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, + (__v2di) __Y, 4, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmplt_epi64_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, + (__v2di) __Y, 1, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi64_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, + (__v2di) __Y, 1, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmpge_epi64_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, + (__v2di) __Y, 5, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_epi64_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, + (__v2di) __Y, 5, + (__mmask8) -1); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmple_epi64_mask (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, + (__v2di) __Y, 2, + (__mmask8) __M); +} + +extern __inline __mmask8 + __attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_epi64_mask (__m128i __X, __m128i __Y) +{ + return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, + (__v2di) __Y, 2, + (__mmask8) -1); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutex_epi64 (__m256i __X, const int __I) +{ + return (__m256i) __builtin_ia32_permdi256_mask ((__v4di) __X, + __I, + (__v4di) + _mm256_setzero_si256(), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutex_epi64 (__m256i __W, __mmask8 __M, + __m256i __X, const int __I) +{ + return (__m256i) __builtin_ia32_permdi256_mask ((__v4di) __X, + __I, + (__v4di) __W, + (__mmask8) __M); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutex_epi64 (__mmask8 __M, __m256i __X, const int __I) +{ + return (__m256i) __builtin_ia32_permdi256_mask ((__v4di) __X, + __I, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __M); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shuffle_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B, const int __imm) +{ + return (__m256d) __builtin_ia32_shufpd256_mask ((__v4df) __A, + (__v4df) __B, __imm, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shuffle_pd (__mmask8 __U, __m256d __A, __m256d __B, + const int __imm) +{ + return (__m256d) __builtin_ia32_shufpd256_mask ((__v4df) __A, + (__v4df) __B, __imm, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shuffle_pd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B, const int __imm) +{ + return (__m128d) __builtin_ia32_shufpd128_mask ((__v2df) __A, + (__v2df) __B, __imm, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shuffle_pd (__mmask8 __U, __m128d __A, __m128d __B, + const int __imm) +{ + return (__m128d) __builtin_ia32_shufpd128_mask ((__v2df) __A, + (__v2df) __B, __imm, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shuffle_ps (__m256 __W, __mmask8 __U, __m256 __A, + __m256 __B, const int __imm) +{ + return (__m256) __builtin_ia32_shufps256_mask ((__v8sf) __A, + (__v8sf) __B, __imm, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shuffle_ps (__mmask8 __U, __m256 __A, __m256 __B, + const int __imm) +{ + return (__m256) __builtin_ia32_shufps256_mask ((__v8sf) __A, + (__v8sf) __B, __imm, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shuffle_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B, + const int __imm) +{ + return (__m128) __builtin_ia32_shufps128_mask ((__v4sf) __A, + (__v4sf) __B, __imm, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shuffle_ps (__mmask8 __U, __m128 __A, __m128 __B, + const int __imm) +{ + return (__m128) __builtin_ia32_shufps128_mask ((__v4sf) __A, + (__v4sf) __B, __imm, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_inserti32x4 (__m256i __A, __m128i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_inserti32x4_256_mask ((__v8si) __A, + (__v4si) __B, + __imm, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_inserti32x4 (__m256i __W, __mmask8 __U, __m256i __A, + __m128i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_inserti32x4_256_mask ((__v8si) __A, + (__v4si) __B, + __imm, + (__v8si) __W, + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_inserti32x4 (__mmask8 __U, __m256i __A, __m128i __B, + const int __imm) +{ + return (__m256i) __builtin_ia32_inserti32x4_256_mask ((__v8si) __A, + (__v4si) __B, + __imm, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insertf32x4 (__m256 __A, __m128 __B, const int __imm) +{ + return (__m256) __builtin_ia32_insertf32x4_256_mask ((__v8sf) __A, + (__v4sf) __B, + __imm, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_insertf32x4 (__m256 __W, __mmask8 __U, __m256 __A, + __m128 __B, const int __imm) +{ + return (__m256) __builtin_ia32_insertf32x4_256_mask ((__v8sf) __A, + (__v4sf) __B, + __imm, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_insertf32x4 (__mmask8 __U, __m256 __A, __m128 __B, + const int __imm) +{ + return (__m256) __builtin_ia32_insertf32x4_256_mask ((__v8sf) __A, + (__v4sf) __B, + __imm, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extracti32x4_epi32 (__m256i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_extracti32x4_256_mask ((__v8si) __A, + __imm, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_extracti32x4_epi32 (__m128i __W, __mmask8 __U, __m256i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_extracti32x4_256_mask ((__v8si) __A, + __imm, + (__v4si) __W, + (__mmask8) + __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_extracti32x4_epi32 (__mmask8 __U, __m256i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_extracti32x4_256_mask ((__v8si) __A, + __imm, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extractf32x4_ps (__m256 __A, const int __imm) +{ + return (__m128) __builtin_ia32_extractf32x4_256_mask ((__v8sf) __A, + __imm, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_extractf32x4_ps (__m128 __W, __mmask8 __U, __m256 __A, + const int __imm) +{ + return (__m128) __builtin_ia32_extractf32x4_256_mask ((__v8sf) __A, + __imm, + (__v4sf) __W, + (__mmask8) + __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_extractf32x4_ps (__mmask8 __U, __m256 __A, + const int __imm) +{ + return (__m128) __builtin_ia32_extractf32x4_256_mask ((__v8sf) __A, + __imm, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) + __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shuffle_i64x2 (__m256i __A, __m256i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_shuf_i64x2_256_mask ((__v4di) __A, + (__v4di) __B, + __imm, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shuffle_i64x2 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_shuf_i64x2_256_mask ((__v4di) __A, + (__v4di) __B, + __imm, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shuffle_i64x2 (__mmask8 __U, __m256i __A, __m256i __B, + const int __imm) +{ + return (__m256i) __builtin_ia32_shuf_i64x2_256_mask ((__v4di) __A, + (__v4di) __B, + __imm, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shuffle_i32x4 (__m256i __A, __m256i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_shuf_i32x4_256_mask ((__v8si) __A, + (__v8si) __B, + __imm, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shuffle_i32x4 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_shuf_i32x4_256_mask ((__v8si) __A, + (__v8si) __B, + __imm, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shuffle_i32x4 (__mmask8 __U, __m256i __A, __m256i __B, + const int __imm) +{ + return (__m256i) __builtin_ia32_shuf_i32x4_256_mask ((__v8si) __A, + (__v8si) __B, + __imm, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shuffle_f64x2 (__m256d __A, __m256d __B, const int __imm) +{ + return (__m256d) __builtin_ia32_shuf_f64x2_256_mask ((__v4df) __A, + (__v4df) __B, + __imm, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shuffle_f64x2 (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B, const int __imm) +{ + return (__m256d) __builtin_ia32_shuf_f64x2_256_mask ((__v4df) __A, + (__v4df) __B, + __imm, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shuffle_f64x2 (__mmask8 __U, __m256d __A, __m256d __B, + const int __imm) +{ + return (__m256d) __builtin_ia32_shuf_f64x2_256_mask ((__v4df) __A, + (__v4df) __B, + __imm, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shuffle_f32x4 (__m256 __A, __m256 __B, const int __imm) +{ + return (__m256) __builtin_ia32_shuf_f32x4_256_mask ((__v8sf) __A, + (__v8sf) __B, + __imm, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shuffle_f32x4 (__m256 __W, __mmask8 __U, __m256 __A, + __m256 __B, const int __imm) +{ + return (__m256) __builtin_ia32_shuf_f32x4_256_mask ((__v8sf) __A, + (__v8sf) __B, + __imm, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shuffle_f32x4 (__mmask8 __U, __m256 __A, __m256 __B, + const int __imm) +{ + return (__m256) __builtin_ia32_shuf_f32x4_256_mask ((__v8sf) __A, + (__v8sf) __B, + __imm, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fixupimm_pd (__m256d __A, __m256d __B, __m256i __C, + const int __imm) +{ + return (__m256d) __builtin_ia32_fixupimmpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4di) __C, + __imm, + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fixupimm_pd (__m256d __A, __mmask8 __U, __m256d __B, + __m256i __C, const int __imm) +{ + return (__m256d) __builtin_ia32_fixupimmpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4di) __C, + __imm, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fixupimm_pd (__mmask8 __U, __m256d __A, __m256d __B, + __m256i __C, const int __imm) +{ + return (__m256d) __builtin_ia32_fixupimmpd256_maskz ((__v4df) __A, + (__v4df) __B, + (__v4di) __C, + __imm, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fixupimm_ps (__m256 __A, __m256 __B, __m256i __C, + const int __imm) +{ + return (__m256) __builtin_ia32_fixupimmps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8si) __C, + __imm, + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_fixupimm_ps (__m256 __A, __mmask8 __U, __m256 __B, + __m256i __C, const int __imm) +{ + return (__m256) __builtin_ia32_fixupimmps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8si) __C, + __imm, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_fixupimm_ps (__mmask8 __U, __m256 __A, __m256 __B, + __m256i __C, const int __imm) +{ + return (__m256) __builtin_ia32_fixupimmps256_maskz ((__v8sf) __A, + (__v8sf) __B, + (__v8si) __C, + __imm, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fixupimm_pd (__m128d __A, __m128d __B, __m128i __C, + const int __imm) +{ + return (__m128d) __builtin_ia32_fixupimmpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2di) __C, + __imm, + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fixupimm_pd (__m128d __A, __mmask8 __U, __m128d __B, + __m128i __C, const int __imm) +{ + return (__m128d) __builtin_ia32_fixupimmpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2di) __C, + __imm, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fixupimm_pd (__mmask8 __U, __m128d __A, __m128d __B, + __m128i __C, const int __imm) +{ + return (__m128d) __builtin_ia32_fixupimmpd128_maskz ((__v2df) __A, + (__v2df) __B, + (__v2di) __C, + __imm, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fixupimm_ps (__m128 __A, __m128 __B, __m128i __C, const int __imm) +{ + return (__m128) __builtin_ia32_fixupimmps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4si) __C, + __imm, + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_fixupimm_ps (__m128 __A, __mmask8 __U, __m128 __B, + __m128i __C, const int __imm) +{ + return (__m128) __builtin_ia32_fixupimmps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4si) __C, + __imm, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_fixupimm_ps (__mmask8 __U, __m128 __A, __m128 __B, + __m128i __C, const int __imm) +{ + return (__m128) __builtin_ia32_fixupimmps128_maskz ((__v4sf) __A, + (__v4sf) __B, + (__v4si) __C, + __imm, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srli_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_psrldi256_mask ((__v8si) __A, __imm, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srli_epi32 (__mmask8 __U, __m256i __A, const int __imm) +{ + return (__m256i) __builtin_ia32_psrldi256_mask ((__v8si) __A, __imm, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srli_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_psrldi128_mask ((__v4si) __A, __imm, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srli_epi32 (__mmask8 __U, __m128i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_psrldi128_mask ((__v4si) __A, __imm, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srli_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_psrlqi256_mask ((__v4di) __A, __imm, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srli_epi64 (__mmask8 __U, __m256i __A, const int __imm) +{ + return (__m256i) __builtin_ia32_psrlqi256_mask ((__v4di) __A, __imm, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srli_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_psrlqi128_mask ((__v2di) __A, __imm, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srli_epi64 (__mmask8 __U, __m128i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_psrlqi128_mask ((__v2di) __A, __imm, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_ternarylogic_epi64 (__m256i __A, __m256i __B, __m256i __C, + const int __imm) +{ + return (__m256i) + __builtin_ia32_pternlogq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __C, + (unsigned char) __imm, + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_ternarylogic_epi64 (__m256i __A, __mmask8 __U, + __m256i __B, __m256i __C, + const int __imm) +{ + return (__m256i) + __builtin_ia32_pternlogq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __C, + (unsigned char) __imm, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_ternarylogic_epi64 (__mmask8 __U, __m256i __A, + __m256i __B, __m256i __C, + const int __imm) +{ + return (__m256i) + __builtin_ia32_pternlogq256_maskz ((__v4di) __A, + (__v4di) __B, + (__v4di) __C, + (unsigned char) __imm, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_ternarylogic_epi32 (__m256i __A, __m256i __B, __m256i __C, + const int __imm) +{ + return (__m256i) + __builtin_ia32_pternlogd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __C, + (unsigned char) __imm, + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_ternarylogic_epi32 (__m256i __A, __mmask8 __U, + __m256i __B, __m256i __C, + const int __imm) +{ + return (__m256i) + __builtin_ia32_pternlogd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __C, + (unsigned char) __imm, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_ternarylogic_epi32 (__mmask8 __U, __m256i __A, + __m256i __B, __m256i __C, + const int __imm) +{ + return (__m256i) + __builtin_ia32_pternlogd256_maskz ((__v8si) __A, + (__v8si) __B, + (__v8si) __C, + (unsigned char) __imm, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ternarylogic_epi64 (__m128i __A, __m128i __B, __m128i __C, + const int __imm) +{ + return (__m128i) + __builtin_ia32_pternlogq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __C, + (unsigned char) __imm, + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_ternarylogic_epi64 (__m128i __A, __mmask8 __U, + __m128i __B, __m128i __C, + const int __imm) +{ + return (__m128i) + __builtin_ia32_pternlogq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __C, + (unsigned char) __imm, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_ternarylogic_epi64 (__mmask8 __U, __m128i __A, + __m128i __B, __m128i __C, + const int __imm) +{ + return (__m128i) + __builtin_ia32_pternlogq128_maskz ((__v2di) __A, + (__v2di) __B, + (__v2di) __C, + (unsigned char) __imm, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ternarylogic_epi32 (__m128i __A, __m128i __B, __m128i __C, + const int __imm) +{ + return (__m128i) + __builtin_ia32_pternlogd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __C, + (unsigned char) __imm, + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_ternarylogic_epi32 (__m128i __A, __mmask8 __U, + __m128i __B, __m128i __C, + const int __imm) +{ + return (__m128i) + __builtin_ia32_pternlogd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __C, + (unsigned char) __imm, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_ternarylogic_epi32 (__mmask8 __U, __m128i __A, + __m128i __B, __m128i __C, + const int __imm) +{ + return (__m128i) + __builtin_ia32_pternlogd128_maskz ((__v4si) __A, + (__v4si) __B, + (__v4si) __C, + (unsigned char) __imm, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_roundscale_ps (__m256 __A, const int __imm) +{ + return (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A, + __imm, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_roundscale_ps (__m256 __W, __mmask8 __U, __m256 __A, + const int __imm) +{ + return (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A, + __imm, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_roundscale_ps (__mmask8 __U, __m256 __A, const int __imm) +{ + return (__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf) __A, + __imm, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_roundscale_pd (__m256d __A, const int __imm) +{ + return (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, + __imm, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_roundscale_pd (__m256d __W, __mmask8 __U, __m256d __A, + const int __imm) +{ + return (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, + __imm, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_roundscale_pd (__mmask8 __U, __m256d __A, const int __imm) +{ + return (__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df) __A, + __imm, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roundscale_ps (__m128 __A, const int __imm) +{ + return (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, + __imm, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_roundscale_ps (__m128 __W, __mmask8 __U, __m128 __A, + const int __imm) +{ + return (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, + __imm, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_roundscale_ps (__mmask8 __U, __m128 __A, const int __imm) +{ + return (__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf) __A, + __imm, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roundscale_pd (__m128d __A, const int __imm) +{ + return (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, + __imm, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_roundscale_pd (__m128d __W, __mmask8 __U, __m128d __A, + const int __imm) +{ + return (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, + __imm, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_roundscale_pd (__mmask8 __U, __m128d __A, const int __imm) +{ + return (__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df) __A, + __imm, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_getmant_ps (__m256 __A, _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m256) __builtin_ia32_getmantps256_mask ((__v8sf) __A, + (__C << 2) | __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_getmant_ps (__m256 __W, __mmask8 __U, __m256 __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m256) __builtin_ia32_getmantps256_mask ((__v8sf) __A, + (__C << 2) | __B, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_getmant_ps (__mmask8 __U, __m256 __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m256) __builtin_ia32_getmantps256_mask ((__v8sf) __A, + (__C << 2) | __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getmant_ps (__m128 __A, _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m128) __builtin_ia32_getmantps128_mask ((__v4sf) __A, + (__C << 2) | __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getmant_ps (__m128 __W, __mmask8 __U, __m128 __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m128) __builtin_ia32_getmantps128_mask ((__v4sf) __A, + (__C << 2) | __B, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getmant_ps (__mmask8 __U, __m128 __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m128) __builtin_ia32_getmantps128_mask ((__v4sf) __A, + (__C << 2) | __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_getmant_pd (__m256d __A, _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m256d) __builtin_ia32_getmantpd256_mask ((__v4df) __A, + (__C << 2) | __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_getmant_pd (__m256d __W, __mmask8 __U, __m256d __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m256d) __builtin_ia32_getmantpd256_mask ((__v4df) __A, + (__C << 2) | __B, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_getmant_pd (__mmask8 __U, __m256d __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m256d) __builtin_ia32_getmantpd256_mask ((__v4df) __A, + (__C << 2) | __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getmant_pd (__m128d __A, _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m128d) __builtin_ia32_getmantpd128_mask ((__v2df) __A, + (__C << 2) | __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_getmant_pd (__m128d __W, __mmask8 __U, __m128d __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m128d) __builtin_ia32_getmantpd128_mask ((__v2df) __A, + (__C << 2) | __B, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_getmant_pd (__mmask8 __U, __m128d __A, + _MM_MANTISSA_NORM_ENUM __B, + _MM_MANTISSA_SIGN_ENUM __C) +{ + return (__m128d) __builtin_ia32_getmantpd128_mask ((__v2df) __A, + (__C << 2) | __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mmask_i32gather_ps (__m256 __v1_old, __mmask8 __mask, + __m256i __index, void const *__addr, + int __scale) +{ + return (__m256) __builtin_ia32_gather3siv8sf ((__v8sf) __v1_old, + __addr, + (__v8si) __index, + __mask, __scale); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mmask_i32gather_ps (__m128 __v1_old, __mmask8 __mask, + __m128i __index, void const *__addr, + int __scale) +{ + return (__m128) __builtin_ia32_gather3siv4sf ((__v4sf) __v1_old, + __addr, + (__v4si) __index, + __mask, __scale); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mmask_i32gather_pd (__m256d __v1_old, __mmask8 __mask, + __m128i __index, void const *__addr, + int __scale) +{ + return (__m256d) __builtin_ia32_gather3siv4df ((__v4df) __v1_old, + __addr, + (__v4si) __index, + __mask, __scale); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mmask_i32gather_pd (__m128d __v1_old, __mmask8 __mask, + __m128i __index, void const *__addr, + int __scale) +{ + return (__m128d) __builtin_ia32_gather3siv2df ((__v2df) __v1_old, + __addr, + (__v4si) __index, + __mask, __scale); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mmask_i64gather_ps (__m128 __v1_old, __mmask8 __mask, + __m256i __index, void const *__addr, + int __scale) +{ + return (__m128) __builtin_ia32_gather3div8sf ((__v4sf) __v1_old, + __addr, + (__v4di) __index, + __mask, __scale); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mmask_i64gather_ps (__m128 __v1_old, __mmask8 __mask, + __m128i __index, void const *__addr, + int __scale) +{ + return (__m128) __builtin_ia32_gather3div4sf ((__v4sf) __v1_old, + __addr, + (__v2di) __index, + __mask, __scale); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mmask_i64gather_pd (__m256d __v1_old, __mmask8 __mask, + __m256i __index, void const *__addr, + int __scale) +{ + return (__m256d) __builtin_ia32_gather3div4df ((__v4df) __v1_old, + __addr, + (__v4di) __index, + __mask, __scale); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mmask_i64gather_pd (__m128d __v1_old, __mmask8 __mask, + __m128i __index, void const *__addr, + int __scale) +{ + return (__m128d) __builtin_ia32_gather3div2df ((__v2df) __v1_old, + __addr, + (__v2di) __index, + __mask, __scale); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mmask_i32gather_epi32 (__m256i __v1_old, __mmask8 __mask, + __m256i __index, void const *__addr, + int __scale) +{ + return (__m256i) __builtin_ia32_gather3siv8si ((__v8si) __v1_old, + __addr, + (__v8si) __index, + __mask, __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mmask_i32gather_epi32 (__m128i __v1_old, __mmask8 __mask, + __m128i __index, void const *__addr, + int __scale) +{ + return (__m128i) __builtin_ia32_gather3siv4si ((__v4si) __v1_old, + __addr, + (__v4si) __index, + __mask, __scale); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mmask_i32gather_epi64 (__m256i __v1_old, __mmask8 __mask, + __m128i __index, void const *__addr, + int __scale) +{ + return (__m256i) __builtin_ia32_gather3siv4di ((__v4di) __v1_old, + __addr, + (__v4si) __index, + __mask, __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mmask_i32gather_epi64 (__m128i __v1_old, __mmask8 __mask, + __m128i __index, void const *__addr, + int __scale) +{ + return (__m128i) __builtin_ia32_gather3siv2di ((__v2di) __v1_old, + __addr, + (__v4si) __index, + __mask, __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mmask_i64gather_epi32 (__m128i __v1_old, __mmask8 __mask, + __m256i __index, void const *__addr, + int __scale) +{ + return (__m128i) __builtin_ia32_gather3div8si ((__v4si) __v1_old, + __addr, + (__v4di) __index, + __mask, __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mmask_i64gather_epi32 (__m128i __v1_old, __mmask8 __mask, + __m128i __index, void const *__addr, + int __scale) +{ + return (__m128i) __builtin_ia32_gather3div4si ((__v4si) __v1_old, + __addr, + (__v2di) __index, + __mask, __scale); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mmask_i64gather_epi64 (__m256i __v1_old, __mmask8 __mask, + __m256i __index, void const *__addr, + int __scale) +{ + return (__m256i) __builtin_ia32_gather3div4di ((__v4di) __v1_old, + __addr, + (__v4di) __index, + __mask, __scale); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mmask_i64gather_epi64 (__m128i __v1_old, __mmask8 __mask, + __m128i __index, void const *__addr, + int __scale) +{ + return (__m128i) __builtin_ia32_gather3div2di ((__v2di) __v1_old, + __addr, + (__v2di) __index, + __mask, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i32scatter_ps (void *__addr, __m256i __index, + __m256 __v1, const int __scale) +{ + __builtin_ia32_scattersiv8sf (__addr, (__mmask8) 0xFF, + (__v8si) __index, (__v8sf) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i32scatter_ps (void *__addr, __mmask8 __mask, + __m256i __index, __m256 __v1, + const int __scale) +{ + __builtin_ia32_scattersiv8sf (__addr, __mask, (__v8si) __index, + (__v8sf) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i32scatter_ps (void *__addr, __m128i __index, __m128 __v1, + const int __scale) +{ + __builtin_ia32_scattersiv4sf (__addr, (__mmask8) 0xFF, + (__v4si) __index, (__v4sf) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i32scatter_ps (void *__addr, __mmask8 __mask, + __m128i __index, __m128 __v1, + const int __scale) +{ + __builtin_ia32_scattersiv4sf (__addr, __mask, (__v4si) __index, + (__v4sf) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i32scatter_pd (void *__addr, __m128i __index, + __m256d __v1, const int __scale) +{ + __builtin_ia32_scattersiv4df (__addr, (__mmask8) 0xFF, + (__v4si) __index, (__v4df) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i32scatter_pd (void *__addr, __mmask8 __mask, + __m128i __index, __m256d __v1, + const int __scale) +{ + __builtin_ia32_scattersiv4df (__addr, __mask, (__v4si) __index, + (__v4df) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i32scatter_pd (void *__addr, __m128i __index, + __m128d __v1, const int __scale) +{ + __builtin_ia32_scattersiv2df (__addr, (__mmask8) 0xFF, + (__v4si) __index, (__v2df) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i32scatter_pd (void *__addr, __mmask8 __mask, + __m128i __index, __m128d __v1, + const int __scale) +{ + __builtin_ia32_scattersiv2df (__addr, __mask, (__v4si) __index, + (__v2df) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i64scatter_ps (void *__addr, __m256i __index, + __m128 __v1, const int __scale) +{ + __builtin_ia32_scatterdiv8sf (__addr, (__mmask8) 0xFF, + (__v4di) __index, (__v4sf) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i64scatter_ps (void *__addr, __mmask8 __mask, + __m256i __index, __m128 __v1, + const int __scale) +{ + __builtin_ia32_scatterdiv8sf (__addr, __mask, (__v4di) __index, + (__v4sf) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i64scatter_ps (void *__addr, __m128i __index, __m128 __v1, + const int __scale) +{ + __builtin_ia32_scatterdiv4sf (__addr, (__mmask8) 0xFF, + (__v2di) __index, (__v4sf) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i64scatter_ps (void *__addr, __mmask8 __mask, + __m128i __index, __m128 __v1, + const int __scale) +{ + __builtin_ia32_scatterdiv4sf (__addr, __mask, (__v2di) __index, + (__v4sf) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i64scatter_pd (void *__addr, __m256i __index, + __m256d __v1, const int __scale) +{ + __builtin_ia32_scatterdiv4df (__addr, (__mmask8) 0xFF, + (__v4di) __index, (__v4df) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i64scatter_pd (void *__addr, __mmask8 __mask, + __m256i __index, __m256d __v1, + const int __scale) +{ + __builtin_ia32_scatterdiv4df (__addr, __mask, (__v4di) __index, + (__v4df) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i64scatter_pd (void *__addr, __m128i __index, + __m128d __v1, const int __scale) +{ + __builtin_ia32_scatterdiv2df (__addr, (__mmask8) 0xFF, + (__v2di) __index, (__v2df) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i64scatter_pd (void *__addr, __mmask8 __mask, + __m128i __index, __m128d __v1, + const int __scale) +{ + __builtin_ia32_scatterdiv2df (__addr, __mask, (__v2di) __index, + (__v2df) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i32scatter_epi32 (void *__addr, __m256i __index, + __m256i __v1, const int __scale) +{ + __builtin_ia32_scattersiv8si (__addr, (__mmask8) 0xFF, + (__v8si) __index, (__v8si) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i32scatter_epi32 (void *__addr, __mmask8 __mask, + __m256i __index, __m256i __v1, + const int __scale) +{ + __builtin_ia32_scattersiv8si (__addr, __mask, (__v8si) __index, + (__v8si) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i32scatter_epi32 (void *__addr, __m128i __index, + __m128i __v1, const int __scale) +{ + __builtin_ia32_scattersiv4si (__addr, (__mmask8) 0xFF, + (__v4si) __index, (__v4si) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i32scatter_epi32 (void *__addr, __mmask8 __mask, + __m128i __index, __m128i __v1, + const int __scale) +{ + __builtin_ia32_scattersiv4si (__addr, __mask, (__v4si) __index, + (__v4si) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i32scatter_epi64 (void *__addr, __m128i __index, + __m256i __v1, const int __scale) +{ + __builtin_ia32_scattersiv4di (__addr, (__mmask8) 0xFF, + (__v4si) __index, (__v4di) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i32scatter_epi64 (void *__addr, __mmask8 __mask, + __m128i __index, __m256i __v1, + const int __scale) +{ + __builtin_ia32_scattersiv4di (__addr, __mask, (__v4si) __index, + (__v4di) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i32scatter_epi64 (void *__addr, __m128i __index, + __m128i __v1, const int __scale) +{ + __builtin_ia32_scattersiv2di (__addr, (__mmask8) 0xFF, + (__v4si) __index, (__v2di) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i32scatter_epi64 (void *__addr, __mmask8 __mask, + __m128i __index, __m128i __v1, + const int __scale) +{ + __builtin_ia32_scattersiv2di (__addr, __mask, (__v4si) __index, + (__v2di) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i64scatter_epi32 (void *__addr, __m256i __index, + __m128i __v1, const int __scale) +{ + __builtin_ia32_scatterdiv8si (__addr, (__mmask8) 0xFF, + (__v4di) __index, (__v4si) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i64scatter_epi32 (void *__addr, __mmask8 __mask, + __m256i __index, __m128i __v1, + const int __scale) +{ + __builtin_ia32_scatterdiv8si (__addr, __mask, (__v4di) __index, + (__v4si) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i64scatter_epi32 (void *__addr, __m128i __index, + __m128i __v1, const int __scale) +{ + __builtin_ia32_scatterdiv4si (__addr, (__mmask8) 0xFF, + (__v2di) __index, (__v4si) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i64scatter_epi32 (void *__addr, __mmask8 __mask, + __m128i __index, __m128i __v1, + const int __scale) +{ + __builtin_ia32_scatterdiv4si (__addr, __mask, (__v2di) __index, + (__v4si) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_i64scatter_epi64 (void *__addr, __m256i __index, + __m256i __v1, const int __scale) +{ + __builtin_ia32_scatterdiv4di (__addr, (__mmask8) 0xFF, + (__v4di) __index, (__v4di) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_i64scatter_epi64 (void *__addr, __mmask8 __mask, + __m256i __index, __m256i __v1, + const int __scale) +{ + __builtin_ia32_scatterdiv4di (__addr, __mask, (__v4di) __index, + (__v4di) __v1, __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_i64scatter_epi64 (void *__addr, __m128i __index, + __m128i __v1, const int __scale) +{ + __builtin_ia32_scatterdiv2di (__addr, (__mmask8) 0xFF, + (__v2di) __index, (__v2di) __v1, + __scale); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_i64scatter_epi64 (void *__addr, __mmask8 __mask, + __m128i __index, __m128i __v1, + const int __scale) +{ + __builtin_ia32_scatterdiv2di (__addr, __mask, (__v2di) __index, + (__v2di) __v1, __scale); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_shuffle_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + _MM_PERM_ENUM __mask) +{ + return (__m256i) __builtin_ia32_pshufd256_mask ((__v8si) __A, __mask, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_shuffle_epi32 (__mmask8 __U, __m256i __A, + _MM_PERM_ENUM __mask) +{ + return (__m256i) __builtin_ia32_pshufd256_mask ((__v8si) __A, __mask, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_shuffle_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + _MM_PERM_ENUM __mask) +{ + return (__m128i) __builtin_ia32_pshufd128_mask ((__v4si) __A, __mask, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_shuffle_epi32 (__mmask8 __U, __m128i __A, + _MM_PERM_ENUM __mask) +{ + return (__m128i) __builtin_ia32_pshufd128_mask ((__v4si) __A, __mask, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rol_epi32 (__m256i __A, const int __B) +{ + return (__m256i) __builtin_ia32_prold256_mask ((__v8si) __A, __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_rol_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + const int __B) +{ + return (__m256i) __builtin_ia32_prold256_mask ((__v8si) __A, __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_rol_epi32 (__mmask8 __U, __m256i __A, const int __B) +{ + return (__m256i) __builtin_ia32_prold256_mask ((__v8si) __A, __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rol_epi32 (__m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_prold128_mask ((__v4si) __A, __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rol_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + const int __B) +{ + return (__m128i) __builtin_ia32_prold128_mask ((__v4si) __A, __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rol_epi32 (__mmask8 __U, __m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_prold128_mask ((__v4si) __A, __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_ror_epi32 (__m256i __A, const int __B) +{ + return (__m256i) __builtin_ia32_prord256_mask ((__v8si) __A, __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_ror_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + const int __B) +{ + return (__m256i) __builtin_ia32_prord256_mask ((__v8si) __A, __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_ror_epi32 (__mmask8 __U, __m256i __A, const int __B) +{ + return (__m256i) __builtin_ia32_prord256_mask ((__v8si) __A, __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ror_epi32 (__m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_prord128_mask ((__v4si) __A, __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_ror_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + const int __B) +{ + return (__m128i) __builtin_ia32_prord128_mask ((__v4si) __A, __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_ror_epi32 (__mmask8 __U, __m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_prord128_mask ((__v4si) __A, __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rol_epi64 (__m256i __A, const int __B) +{ + return (__m256i) __builtin_ia32_prolq256_mask ((__v4di) __A, __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_rol_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + const int __B) +{ + return (__m256i) __builtin_ia32_prolq256_mask ((__v4di) __A, __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_rol_epi64 (__mmask8 __U, __m256i __A, const int __B) +{ + return (__m256i) __builtin_ia32_prolq256_mask ((__v4di) __A, __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rol_epi64 (__m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_prolq128_mask ((__v2di) __A, __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_rol_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + const int __B) +{ + return (__m128i) __builtin_ia32_prolq128_mask ((__v2di) __A, __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_rol_epi64 (__mmask8 __U, __m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_prolq128_mask ((__v2di) __A, __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_ror_epi64 (__m256i __A, const int __B) +{ + return (__m256i) __builtin_ia32_prorq256_mask ((__v4di) __A, __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_ror_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + const int __B) +{ + return (__m256i) __builtin_ia32_prorq256_mask ((__v4di) __A, __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_ror_epi64 (__mmask8 __U, __m256i __A, const int __B) +{ + return (__m256i) __builtin_ia32_prorq256_mask ((__v4di) __A, __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ror_epi64 (__m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_prorq128_mask ((__v2di) __A, __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_ror_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + const int __B) +{ + return (__m128i) __builtin_ia32_prorq128_mask ((__v2di) __A, __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_ror_epi64 (__mmask8 __U, __m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_prorq128_mask ((__v2di) __A, __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_alignr_epi32 (__m128i __A, __m128i __B, const int __imm) +{ + return (__m128i) __builtin_ia32_alignd128_mask ((__v4si) __A, + (__v4si) __B, __imm, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_alignr_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B, const int __imm) +{ + return (__m128i) __builtin_ia32_alignd128_mask ((__v4si) __A, + (__v4si) __B, __imm, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_alignr_epi32 (__mmask8 __U, __m128i __A, __m128i __B, + const int __imm) +{ + return (__m128i) __builtin_ia32_alignd128_mask ((__v4si) __A, + (__v4si) __B, __imm, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_alignr_epi64 (__m128i __A, __m128i __B, const int __imm) +{ + return (__m128i) __builtin_ia32_alignq128_mask ((__v2di) __A, + (__v2di) __B, __imm, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_alignr_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B, const int __imm) +{ + return (__m128i) __builtin_ia32_alignq128_mask ((__v2di) __A, + (__v2di) __B, __imm, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_alignr_epi64 (__mmask8 __U, __m128i __A, __m128i __B, + const int __imm) +{ + return (__m128i) __builtin_ia32_alignq128_mask ((__v2di) __A, + (__v2di) __B, __imm, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_alignr_epi32 (__m256i __A, __m256i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_alignd256_mask ((__v8si) __A, + (__v8si) __B, __imm, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_alignr_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_alignd256_mask ((__v8si) __A, + (__v8si) __B, __imm, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_alignr_epi32 (__mmask8 __U, __m256i __A, __m256i __B, + const int __imm) +{ + return (__m256i) __builtin_ia32_alignd256_mask ((__v8si) __A, + (__v8si) __B, __imm, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_alignr_epi64 (__m256i __A, __m256i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_alignq256_mask ((__v4di) __A, + (__v4di) __B, __imm, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_alignr_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B, const int __imm) +{ + return (__m256i) __builtin_ia32_alignq256_mask ((__v4di) __A, + (__v4di) __B, __imm, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_alignr_epi64 (__mmask8 __U, __m256i __A, __m256i __B, + const int __imm) +{ + return (__m256i) __builtin_ia32_alignq256_mask ((__v4di) __A, + (__v4di) __B, __imm, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m128 __A, + const int __I) +{ + return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, __I, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_cvtps_ph (__mmask8 __U, __m128 __A, const int __I) +{ + return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, __I, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m256 __A, + const int __I) +{ + return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, __I, + (__v8hi) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_cvtps_ph (__mmask8 __U, __m256 __A, const int __I) +{ + return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, __I, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srai_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_psradi256_mask ((__v8si) __A, __imm, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srai_epi32 (__mmask8 __U, __m256i __A, const int __imm) +{ + return (__m256i) __builtin_ia32_psradi256_mask ((__v8si) __A, __imm, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srai_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_psradi128_mask ((__v4si) __A, __imm, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srai_epi32 (__mmask8 __U, __m128i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_psradi128_mask ((__v4si) __A, __imm, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_srai_epi64 (__m256i __A, const int __imm) +{ + return (__m256i) __builtin_ia32_psraqi256_mask ((__v4di) __A, __imm, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_srai_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + const int __imm) +{ + return (__m256i) __builtin_ia32_psraqi256_mask ((__v4di) __A, __imm, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_srai_epi64 (__mmask8 __U, __m256i __A, const int __imm) +{ + return (__m256i) __builtin_ia32_psraqi256_mask ((__v4di) __A, __imm, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srai_epi64 (__m128i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_psraqi128_mask ((__v2di) __A, __imm, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_srai_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + const int __imm) +{ + return (__m128i) __builtin_ia32_psraqi128_mask ((__v2di) __A, __imm, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_srai_epi64 (__mmask8 __U, __m128i __A, const int __imm) +{ + return (__m128i) __builtin_ia32_psraqi128_mask ((__v2di) __A, __imm, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_slli_epi32 (__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i) __builtin_ia32_pslldi128_mask ((__v4si) __A, __B, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_slli_epi32 (__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i) __builtin_ia32_pslldi128_mask ((__v4si) __A, __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_slli_epi64 (__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i) __builtin_ia32_psllqi128_mask ((__v2di) __A, __B, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_slli_epi64 (__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i) __builtin_ia32_psllqi128_mask ((__v2di) __A, __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_slli_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + int __B) +{ + return (__m256i) __builtin_ia32_pslldi256_mask ((__v8si) __A, __B, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_slli_epi32 (__mmask8 __U, __m256i __A, int __B) +{ + return (__m256i) __builtin_ia32_pslldi256_mask ((__v8si) __A, __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_slli_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + int __B) +{ + return (__m256i) __builtin_ia32_psllqi256_mask ((__v4di) __A, __B, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_slli_epi64 (__mmask8 __U, __m256i __A, int __B) +{ + return (__m256i) __builtin_ia32_psllqi256_mask ((__v4di) __A, __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permutex_pd (__m256d __W, __mmask8 __U, __m256d __X, + const int __imm) +{ + return (__m256d) __builtin_ia32_permdf256_mask ((__v4df) __X, __imm, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permutex_pd (__mmask8 __U, __m256d __X, const int __imm) +{ + return (__m256d) __builtin_ia32_permdf256_mask ((__v4df) __X, __imm, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permute_pd (__m256d __W, __mmask8 __U, __m256d __X, + const int __C) +{ + return (__m256d) __builtin_ia32_vpermilpd256_mask ((__v4df) __X, __C, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permute_pd (__mmask8 __U, __m256d __X, const int __C) +{ + return (__m256d) __builtin_ia32_vpermilpd256_mask ((__v4df) __X, __C, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_permute_pd (__m128d __W, __mmask8 __U, __m128d __X, + const int __C) +{ + return (__m128d) __builtin_ia32_vpermilpd_mask ((__v2df) __X, __C, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_permute_pd (__mmask8 __U, __m128d __X, const int __C) +{ + return (__m128d) __builtin_ia32_vpermilpd_mask ((__v2df) __X, __C, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_permute_ps (__m256 __W, __mmask8 __U, __m256 __X, + const int __C) +{ + return (__m256) __builtin_ia32_vpermilps256_mask ((__v8sf) __X, __C, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_permute_ps (__mmask8 __U, __m256 __X, const int __C) +{ + return (__m256) __builtin_ia32_vpermilps256_mask ((__v8sf) __X, __C, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_permute_ps (__m128 __W, __mmask8 __U, __m128 __X, + const int __C) +{ + return (__m128) __builtin_ia32_vpermilps_mask ((__v4sf) __X, __C, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_permute_ps (__mmask8 __U, __m128 __X, const int __C) +{ + return (__m128) __builtin_ia32_vpermilps_mask ((__v4sf) __X, __C, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) +{ + return (__m256d) __builtin_ia32_blendmpd_256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +extern __inline __m256 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) +{ + return (__m256) __builtin_ia32_blendmps_256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) +{ + return (__m256i) __builtin_ia32_blendmq_256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) +{ + return (__m256i) __builtin_ia32_blendmd_256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +extern __inline __m128d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) +{ + return (__m128d) __builtin_ia32_blendmpd_128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +extern __inline __m128 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) +{ + return (__m128) __builtin_ia32_blendmps_128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) +{ + return (__m128i) __builtin_ia32_blendmq_128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) +{ + return (__m128i) __builtin_ia32_blendmd_128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_epi64_mask (__m256i __X, __m256i __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, + (__v4di) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_epi32_mask (__m256i __X, __m256i __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, + (__v8si) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_epu64_mask (__m256i __X, __m256i __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, + (__v4di) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_epu32_mask (__m256i __X, __m256i __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, + (__v8si) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_pd_mask (__m256d __X, __m256d __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmppd256_mask ((__v4df) __X, + (__v4df) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_ps_mask (__m256 __X, __m256 __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmpps256_mask ((__v8sf) __X, + (__v8sf) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmp_epi64_mask (__mmask8 __U, __m256i __X, __m256i __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_cmpq256_mask ((__v4di) __X, + (__v4di) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmp_epi32_mask (__mmask8 __U, __m256i __X, __m256i __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_cmpd256_mask ((__v8si) __X, + (__v8si) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmp_epu64_mask (__mmask8 __U, __m256i __X, __m256i __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di) __X, + (__v4di) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmp_epu32_mask (__mmask8 __U, __m256i __X, __m256i __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si) __X, + (__v8si) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmp_pd_mask (__mmask8 __U, __m256d __X, __m256d __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_cmppd256_mask ((__v4df) __X, + (__v4df) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_cmp_ps_mask (__mmask8 __U, __m256 __X, __m256 __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_cmpps256_mask ((__v8sf) __X, + (__v8sf) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_epi64_mask (__m128i __X, __m128i __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, + (__v2di) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_epi32_mask (__m128i __X, __m128i __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, + (__v4si) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_epu64_mask (__m128i __X, __m128i __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, + (__v2di) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_epu32_mask (__m128i __X, __m128i __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, + (__v4si) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_pd_mask (__m128d __X, __m128d __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmppd128_mask ((__v2df) __X, + (__v2df) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_ps_mask (__m128 __X, __m128 __Y, const int __P) +{ + return (__mmask8) __builtin_ia32_cmpps128_mask ((__v4sf) __X, + (__v4sf) __Y, __P, + (__mmask8) -1); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_epi64_mask (__mmask8 __U, __m128i __X, __m128i __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_cmpq128_mask ((__v2di) __X, + (__v2di) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_epi32_mask (__mmask8 __U, __m128i __X, __m128i __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_cmpd128_mask ((__v4si) __X, + (__v4si) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_epu64_mask (__mmask8 __U, __m128i __X, __m128i __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di) __X, + (__v2di) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_epu32_mask (__mmask8 __U, __m128i __X, __m128i __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si) __X, + (__v4si) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_pd_mask (__mmask8 __U, __m128d __X, __m128d __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_cmppd128_mask ((__v2df) __X, + (__v2df) __Y, __P, + (__mmask8) __U); +} + +extern __inline __mmask8 +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_cmp_ps_mask (__mmask8 __U, __m128 __X, __m128 __Y, + const int __P) +{ + return (__mmask8) __builtin_ia32_cmpps128_mask ((__v4sf) __X, + (__v4sf) __Y, __P, + (__mmask8) __U); +} + +extern __inline __m256d +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutex_pd (__m256d __X, const int __M) +{ + return (__m256d) __builtin_ia32_permdf256_mask ((__v4df) __X, __M, + (__v4df) + _mm256_undefined_pd (), + (__mmask8) -1); +} + +#else +#define _mm256_permutex_pd(X, M) \ + ((__m256d) __builtin_ia32_permdf256_mask ((__v4df)(__m256d)(X), (int)(M), \ + (__v4df)(__m256d) \ + _mm256_undefined_pd (), \ + (__mmask8)-1)) + +#define _mm256_permutex_epi64(X, I) \ + ((__m256i) __builtin_ia32_permdi256_mask ((__v4di)(__m256i)(X), \ + (int)(I), \ + (__v4di)(__m256i) \ + (_mm256_setzero_si256 ()),\ + (__mmask8) -1)) + +#define _mm256_maskz_permutex_epi64(M, X, I) \ + ((__m256i) __builtin_ia32_permdi256_mask ((__v4di)(__m256i)(X), \ + (int)(I), \ + (__v4di)(__m256i) \ + (_mm256_setzero_si256 ()),\ + (__mmask8)(M))) + +#define _mm256_mask_permutex_epi64(W, M, X, I) \ + ((__m256i) __builtin_ia32_permdi256_mask ((__v4di)(__m256i)(X), \ + (int)(I), \ + (__v4di)(__m256i)(W), \ + (__mmask8)(M))) + +#define _mm256_insertf32x4(X, Y, C) \ + ((__m256) __builtin_ia32_insertf32x4_256_mask ((__v8sf)(__m256) (X), \ + (__v4sf)(__m128) (Y), (int) (C), \ + (__v8sf)(__m256)_mm256_setzero_ps (), \ + (__mmask8)-1)) + +#define _mm256_mask_insertf32x4(W, U, X, Y, C) \ + ((__m256) __builtin_ia32_insertf32x4_256_mask ((__v8sf)(__m256) (X), \ + (__v4sf)(__m128) (Y), (int) (C), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_insertf32x4(U, X, Y, C) \ + ((__m256) __builtin_ia32_insertf32x4_256_mask ((__v8sf)(__m256) (X), \ + (__v4sf)(__m128) (Y), (int) (C), \ + (__v8sf)(__m256)_mm256_setzero_ps (), \ + (__mmask8)(U))) + +#define _mm256_inserti32x4(X, Y, C) \ + ((__m256i) __builtin_ia32_inserti32x4_256_mask ((__v8si)(__m256i) (X),\ + (__v4si)(__m128i) (Y), (int) (C), \ + (__v8si)(__m256i)_mm256_setzero_si256 (), \ + (__mmask8)-1)) + +#define _mm256_mask_inserti32x4(W, U, X, Y, C) \ + ((__m256i) __builtin_ia32_inserti32x4_256_mask ((__v8si)(__m256i) (X),\ + (__v4si)(__m128i) (Y), (int) (C), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_inserti32x4(U, X, Y, C) \ + ((__m256i) __builtin_ia32_inserti32x4_256_mask ((__v8si)(__m256i) (X),\ + (__v4si)(__m128i) (Y), (int) (C), \ + (__v8si)(__m256i)_mm256_setzero_si256 (), \ + (__mmask8)(U))) + +#define _mm256_extractf32x4_ps(X, C) \ + ((__m128) __builtin_ia32_extractf32x4_256_mask ((__v8sf)(__m256) (X), \ + (int) (C), \ + (__v4sf)(__m128)_mm_setzero_ps (), \ + (__mmask8)-1)) + +#define _mm256_mask_extractf32x4_ps(W, U, X, C) \ + ((__m128) __builtin_ia32_extractf32x4_256_mask ((__v8sf)(__m256) (X), \ + (int) (C), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_extractf32x4_ps(U, X, C) \ + ((__m128) __builtin_ia32_extractf32x4_256_mask ((__v8sf)(__m256) (X), \ + (int) (C), \ + (__v4sf)(__m128)_mm_setzero_ps (), \ + (__mmask8)(U))) + +#define _mm256_extracti32x4_epi32(X, C) \ + ((__m128i) __builtin_ia32_extracti32x4_256_mask ((__v8si)(__m256i) (X),\ + (int) (C), (__v4si)(__m128i)_mm_setzero_si128 (), (__mmask8)-1)) + +#define _mm256_mask_extracti32x4_epi32(W, U, X, C) \ + ((__m128i) __builtin_ia32_extracti32x4_256_mask ((__v8si)(__m256i) (X),\ + (int) (C), (__v4si)(__m128i)(W), (__mmask8)(U))) + +#define _mm256_maskz_extracti32x4_epi32(U, X, C) \ + ((__m128i) __builtin_ia32_extracti32x4_256_mask ((__v8si)(__m256i) (X),\ + (int) (C), (__v4si)(__m128i)_mm_setzero_si128 (), (__mmask8)(U))) + +#define _mm256_shuffle_i64x2(X, Y, C) \ + ((__m256i) __builtin_ia32_shuf_i64x2_256_mask ((__v4di)(__m256i)(X), \ + (__v4di)(__m256i)(Y), (int)(C), \ + (__v4di)(__m256i)_mm256_setzero_si256 (), \ + (__mmask8)-1)) + +#define _mm256_mask_shuffle_i64x2(W, U, X, Y, C) \ + ((__m256i) __builtin_ia32_shuf_i64x2_256_mask ((__v4di)(__m256i)(X), \ + (__v4di)(__m256i)(Y), (int)(C), \ + (__v4di)(__m256i)(W),\ + (__mmask8)(U))) + +#define _mm256_maskz_shuffle_i64x2(U, X, Y, C) \ + ((__m256i) __builtin_ia32_shuf_i64x2_256_mask ((__v4di)(__m256i)(X), \ + (__v4di)(__m256i)(Y), (int)(C), \ + (__v4di)(__m256i)_mm256_setzero_si256 (), \ + (__mmask8)(U))) + +#define _mm256_shuffle_i32x4(X, Y, C) \ + ((__m256i) __builtin_ia32_shuf_i32x4_256_mask ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), (int)(C), \ + (__v8si)(__m256i) \ + _mm256_setzero_si256 (), \ + (__mmask8)-1)) + +#define _mm256_mask_shuffle_i32x4(W, U, X, Y, C) \ + ((__m256i) __builtin_ia32_shuf_i32x4_256_mask ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), (int)(C), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_shuffle_i32x4(U, X, Y, C) \ + ((__m256i) __builtin_ia32_shuf_i32x4_256_mask ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), (int)(C), \ + (__v8si)(__m256i) \ + _mm256_setzero_si256 (), \ + (__mmask8)(U))) + +#define _mm256_shuffle_f64x2(X, Y, C) \ + ((__m256d) __builtin_ia32_shuf_f64x2_256_mask ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), (int)(C), \ + (__v4df)(__m256d)_mm256_setzero_pd (),\ + (__mmask8)-1)) + +#define _mm256_mask_shuffle_f64x2(W, U, X, Y, C) \ + ((__m256d) __builtin_ia32_shuf_f64x2_256_mask ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), (int)(C), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_shuffle_f64x2(U, X, Y, C) \ + ((__m256d) __builtin_ia32_shuf_f64x2_256_mask ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), (int)(C), \ + (__v4df)(__m256d)_mm256_setzero_pd( ),\ + (__mmask8)(U))) + +#define _mm256_shuffle_f32x4(X, Y, C) \ + ((__m256) __builtin_ia32_shuf_f32x4_256_mask ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), (int)(C), \ + (__v8sf)(__m256)_mm256_setzero_ps (), \ + (__mmask8)-1)) + +#define _mm256_mask_shuffle_f32x4(W, U, X, Y, C) \ + ((__m256) __builtin_ia32_shuf_f32x4_256_mask ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), (int)(C), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_shuffle_f32x4(U, X, Y, C) \ + ((__m256) __builtin_ia32_shuf_f32x4_256_mask ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), (int)(C), \ + (__v8sf)(__m256)_mm256_setzero_ps (), \ + (__mmask8)(U))) + +#define _mm256_mask_shuffle_pd(W, U, A, B, C) \ + ((__m256d)__builtin_ia32_shufpd256_mask ((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_shuffle_pd(U, A, B, C) \ + ((__m256d)__builtin_ia32_shufpd256_mask ((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)(__m256d) \ + _mm256_setzero_pd (), \ + (__mmask8)(U))) + +#define _mm_mask_shuffle_pd(W, U, A, B, C) \ + ((__m128d)__builtin_ia32_shufpd128_mask ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_shuffle_pd(U, A, B, C) \ + ((__m128d)__builtin_ia32_shufpd128_mask ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)(__m128d)_mm_setzero_pd (), \ + (__mmask8)(U))) + +#define _mm256_mask_shuffle_ps(W, U, A, B, C) \ + ((__m256) __builtin_ia32_shufps256_mask ((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_shuffle_ps(U, A, B, C) \ + ((__m256) __builtin_ia32_shufps256_mask ((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)(__m256)_mm256_setzero_ps (),\ + (__mmask8)(U))) + +#define _mm_mask_shuffle_ps(W, U, A, B, C) \ + ((__m128) __builtin_ia32_shufps128_mask ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_shuffle_ps(U, A, B, C) \ + ((__m128) __builtin_ia32_shufps128_mask ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)(__m128)_mm_setzero_ps (), \ + (__mmask8)(U))) + +#define _mm256_fixupimm_pd(X, Y, Z, C) \ + ((__m256d)__builtin_ia32_fixupimmpd256_mask ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), \ + (__v4di)(__m256i)(Z), (int)(C), \ + (__mmask8)(-1))) + +#define _mm256_mask_fixupimm_pd(X, U, Y, Z, C) \ + ((__m256d)__builtin_ia32_fixupimmpd256_mask ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), \ + (__v4di)(__m256i)(Z), (int)(C), \ + (__mmask8)(U))) + +#define _mm256_maskz_fixupimm_pd(U, X, Y, Z, C) \ + ((__m256d)__builtin_ia32_fixupimmpd256_maskz ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), \ + (__v4di)(__m256i)(Z), (int)(C),\ + (__mmask8)(U))) + +#define _mm256_fixupimm_ps(X, Y, Z, C) \ + ((__m256)__builtin_ia32_fixupimmps256_mask ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), \ + (__v8si)(__m256i)(Z), (int)(C), \ + (__mmask8)(-1))) + + +#define _mm256_mask_fixupimm_ps(X, U, Y, Z, C) \ + ((__m256)__builtin_ia32_fixupimmps256_mask ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), \ + (__v8si)(__m256i)(Z), (int)(C), \ + (__mmask8)(U))) + +#define _mm256_maskz_fixupimm_ps(U, X, Y, Z, C) \ + ((__m256)__builtin_ia32_fixupimmps256_maskz ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), \ + (__v8si)(__m256i)(Z), (int)(C),\ + (__mmask8)(U))) + +#define _mm_fixupimm_pd(X, Y, Z, C) \ + ((__m128d)__builtin_ia32_fixupimmpd128_mask ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__v2di)(__m128i)(Z), (int)(C), \ + (__mmask8)(-1))) + + +#define _mm_mask_fixupimm_pd(X, U, Y, Z, C) \ + ((__m128d)__builtin_ia32_fixupimmpd128_mask ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__v2di)(__m128i)(Z), (int)(C), \ + (__mmask8)(U))) + +#define _mm_maskz_fixupimm_pd(U, X, Y, Z, C) \ + ((__m128d)__builtin_ia32_fixupimmpd128_maskz ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__v2di)(__m128i)(Z), (int)(C),\ + (__mmask8)(U))) + +#define _mm_fixupimm_ps(X, Y, Z, C) \ + ((__m128)__builtin_ia32_fixupimmps128_mask ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), \ + (__v4si)(__m128i)(Z), (int)(C), \ + (__mmask8)(-1))) + +#define _mm_mask_fixupimm_ps(X, U, Y, Z, C) \ + ((__m128)__builtin_ia32_fixupimmps128_mask ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), \ + (__v4si)(__m128i)(Z), (int)(C),\ + (__mmask8)(U))) + +#define _mm_maskz_fixupimm_ps(U, X, Y, Z, C) \ + ((__m128)__builtin_ia32_fixupimmps128_maskz ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), \ + (__v4si)(__m128i)(Z), (int)(C),\ + (__mmask8)(U))) + +#define _mm256_mask_srli_epi32(W, U, A, B) \ + ((__m256i) __builtin_ia32_psrldi256_mask ((__v8si)(__m256i)(A), \ + (int)(B), (__v8si)(__m256i)(W), (__mmask8)(U))) + +#define _mm256_maskz_srli_epi32(U, A, B) \ + ((__m256i) __builtin_ia32_psrldi256_mask ((__v8si)(__m256i)(A), \ + (int)(B), (__v8si)_mm256_setzero_si256 (), (__mmask8)(U))) + +#define _mm_mask_srli_epi32(W, U, A, B) \ + ((__m128i) __builtin_ia32_psrldi128_mask ((__v4si)(__m128i)(A), \ + (int)(B), (__v4si)(__m128i)(W), (__mmask8)(U))) + +#define _mm_maskz_srli_epi32(U, A, B) \ + ((__m128i) __builtin_ia32_psrldi128_mask ((__v4si)(__m128i)(A), \ + (int)(B), (__v4si)_mm_setzero_si128 (), (__mmask8)(U))) + +#define _mm256_mask_srli_epi64(W, U, A, B) \ + ((__m256i) __builtin_ia32_psrlqi256_mask ((__v4di)(__m256i)(A), \ + (int)(B), (__v4di)(__m256i)(W), (__mmask8)(U))) + +#define _mm256_maskz_srli_epi64(U, A, B) \ + ((__m256i) __builtin_ia32_psrlqi256_mask ((__v4di)(__m256i)(A), \ + (int)(B), (__v4di)_mm256_setzero_si256 (), (__mmask8)(U))) + +#define _mm_mask_srli_epi64(W, U, A, B) \ + ((__m128i) __builtin_ia32_psrlqi128_mask ((__v2di)(__m128i)(A), \ + (int)(B), (__v2di)(__m128i)(W), (__mmask8)(U))) + +#define _mm_maskz_srli_epi64(U, A, B) \ + ((__m128i) __builtin_ia32_psrlqi128_mask ((__v2di)(__m128i)(A), \ + (int)(B), (__v2di)_mm_setzero_si128 (), (__mmask8)(U))) + +#define _mm256_mask_slli_epi32(W, U, X, C) \ + ((__m256i)__builtin_ia32_pslldi256_mask ((__v8si)(__m256i)(X), (int)(C),\ + (__v8si)(__m256i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_slli_epi32(U, X, C) \ + ((__m256i)__builtin_ia32_pslldi256_mask ((__v8si)(__m256i)(X), (int)(C),\ + (__v8si)(__m256i)_mm256_setzero_si256 (), \ + (__mmask8)(U))) + +#define _mm256_mask_slli_epi64(W, U, X, C) \ + ((__m256i)__builtin_ia32_psllqi256_mask ((__v4di)(__m256i)(X), (int)(C),\ + (__v4di)(__m256i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_slli_epi64(U, X, C) \ + ((__m256i)__builtin_ia32_psllqi256_mask ((__v4di)(__m256i)(X), (int)(C),\ + (__v4di)(__m256i)_mm256_setzero_si256 (), \ + (__mmask8)(U))) + +#define _mm_mask_slli_epi32(W, U, X, C) \ + ((__m128i)__builtin_ia32_pslldi128_mask ((__v4si)(__m128i)(X), (int)(C),\ + (__v4si)(__m128i)(W),\ + (__mmask8)(U))) + +#define _mm_maskz_slli_epi32(U, X, C) \ + ((__m128i)__builtin_ia32_pslldi128_mask ((__v4si)(__m128i)(X), (int)(C),\ + (__v4si)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)(U))) + +#define _mm_mask_slli_epi64(W, U, X, C) \ + ((__m128i)__builtin_ia32_psllqi128_mask ((__v2di)(__m128i)(X), (int)(C),\ + (__v2di)(__m128i)(W),\ + (__mmask8)(U))) + +#define _mm_maskz_slli_epi64(U, X, C) \ + ((__m128i)__builtin_ia32_psllqi128_mask ((__v2di)(__m128i)(X), (int)(C),\ + (__v2di)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)(U))) + +#define _mm256_ternarylogic_epi64(A, B, C, I) \ + ((__m256i) \ + __builtin_ia32_pternlogq256_mask ((__v4di) (__m256i) (A), \ + (__v4di) (__m256i) (B), \ + (__v4di) (__m256i) (C), \ + (unsigned char) (I), \ + (__mmask8) -1)) + +#define _mm256_mask_ternarylogic_epi64(A, U, B, C, I) \ + ((__m256i) \ + __builtin_ia32_pternlogq256_mask ((__v4di) (__m256i) (A), \ + (__v4di) (__m256i) (B), \ + (__v4di) (__m256i) (C), \ + (unsigned char) (I), \ + (__mmask8) (U))) + +#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, I) \ + ((__m256i) \ + __builtin_ia32_pternlogq256_maskz ((__v4di) (__m256i) (A), \ + (__v4di) (__m256i) (B), \ + (__v4di) (__m256i) (C), \ + (unsigned char) (I), \ + (__mmask8) (U))) + +#define _mm256_ternarylogic_epi32(A, B, C, I) \ + ((__m256i) \ + __builtin_ia32_pternlogd256_mask ((__v8si) (__m256i) (A), \ + (__v8si) (__m256i) (B), \ + (__v8si) (__m256i) (C), \ + (unsigned char) (I), \ + (__mmask8) -1)) + +#define _mm256_mask_ternarylogic_epi32(A, U, B, C, I) \ + ((__m256i) \ + __builtin_ia32_pternlogd256_mask ((__v8si) (__m256i) (A), \ + (__v8si) (__m256i) (B), \ + (__v8si) (__m256i) (C), \ + (unsigned char) (I), \ + (__mmask8) (U))) + +#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, I) \ + ((__m256i) \ + __builtin_ia32_pternlogd256_maskz ((__v8si) (__m256i) (A), \ + (__v8si) (__m256i) (B), \ + (__v8si) (__m256i) (C), \ + (unsigned char) (I), \ + (__mmask8) (U))) + +#define _mm_ternarylogic_epi64(A, B, C, I) \ + ((__m128i) \ + __builtin_ia32_pternlogq128_mask ((__v2di) (__m128i) (A), \ + (__v2di) (__m128i) (B), \ + (__v2di) (__m128i) (C), \ + (unsigned char) (I), \ + (__mmask8) -1)) + +#define _mm_mask_ternarylogic_epi64(A, U, B, C, I) \ + ((__m128i) \ + __builtin_ia32_pternlogq128_mask ((__v2di) (__m128i) (A), \ + (__v2di) (__m128i) (B), \ + (__v2di) (__m128i) (C), \ + (unsigned char) (I), \ + (__mmask8) (U))) + +#define _mm_maskz_ternarylogic_epi64(U, A, B, C, I) \ + ((__m128i) \ + __builtin_ia32_pternlogq128_maskz ((__v2di) (__m128i) (A), \ + (__v2di) (__m128i) (B), \ + (__v2di) (__m128i) (C), \ + (unsigned char) (I), \ + (__mmask8) (U))) + +#define _mm_ternarylogic_epi32(A, B, C, I) \ + ((__m128i) \ + __builtin_ia32_pternlogd128_mask ((__v4si) (__m128i) (A), \ + (__v4si) (__m128i) (B), \ + (__v4si) (__m128i) (C), \ + (unsigned char) (I), \ + (__mmask8) -1)) + +#define _mm_mask_ternarylogic_epi32(A, U, B, C, I) \ + ((__m128i) \ + __builtin_ia32_pternlogd128_mask ((__v4si) (__m128i) (A), \ + (__v4si) (__m128i) (B), \ + (__v4si) (__m128i) (C), \ + (unsigned char) (I), \ + (__mmask8) (U))) + +#define _mm_maskz_ternarylogic_epi32(U, A, B, C, I) \ + ((__m128i) \ + __builtin_ia32_pternlogd128_maskz ((__v4si) (__m128i) (A), \ + (__v4si) (__m128i) (B), \ + (__v4si) (__m128i) (C), \ + (unsigned char) (I), \ + (__mmask8) (U))) + +#define _mm256_roundscale_ps(A, B) \ + ((__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf)(__m256)(A), \ + (int)(B), (__v8sf)(__m256)_mm256_setzero_ps (), (__mmask8)-1)) + +#define _mm256_mask_roundscale_ps(W, U, A, B) \ + ((__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf)(__m256)(A), \ + (int)(B), (__v8sf)(__m256)(W), (__mmask8)(U))) + +#define _mm256_maskz_roundscale_ps(U, A, B) \ + ((__m256) __builtin_ia32_rndscaleps_256_mask ((__v8sf)(__m256)(A), \ + (int)(B), (__v8sf)(__m256)_mm256_setzero_ps (), (__mmask8)(U))) + +#define _mm256_roundscale_pd(A, B) \ + ((__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df)(__m256d)(A), \ + (int)(B), (__v4df)(__m256d)_mm256_setzero_pd (), (__mmask8)-1)) + +#define _mm256_mask_roundscale_pd(W, U, A, B) \ + ((__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df)(__m256d)(A), \ + (int)(B), (__v4df)(__m256d)(W), (__mmask8)(U))) + +#define _mm256_maskz_roundscale_pd(U, A, B) \ + ((__m256d) __builtin_ia32_rndscalepd_256_mask ((__v4df)(__m256d)(A), \ + (int)(B), (__v4df)(__m256d)_mm256_setzero_pd (), (__mmask8)(U))) + +#define _mm_roundscale_ps(A, B) \ + ((__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf)(__m128)(A), \ + (int)(B), (__v4sf)(__m128)_mm_setzero_ps (), (__mmask8)-1)) + +#define _mm_mask_roundscale_ps(W, U, A, B) \ + ((__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf)(__m128)(A), \ + (int)(B), (__v4sf)(__m128)(W), (__mmask8)(U))) + +#define _mm_maskz_roundscale_ps(U, A, B) \ + ((__m128) __builtin_ia32_rndscaleps_128_mask ((__v4sf)(__m128)(A), \ + (int)(B), (__v4sf)(__m128)_mm_setzero_ps (), (__mmask8)(U))) + +#define _mm_roundscale_pd(A, B) \ + ((__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df)(__m128d)(A), \ + (int)(B), (__v2df)(__m128d)_mm_setzero_pd (), (__mmask8)-1)) + +#define _mm_mask_roundscale_pd(W, U, A, B) \ + ((__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df)(__m128d)(A), \ + (int)(B), (__v2df)(__m128d)(W), (__mmask8)(U))) + +#define _mm_maskz_roundscale_pd(U, A, B) \ + ((__m128d) __builtin_ia32_rndscalepd_128_mask ((__v2df)(__m128d)(A), \ + (int)(B), (__v2df)(__m128d)_mm_setzero_pd (), (__mmask8)(U))) + +#define _mm256_getmant_ps(X, B, C) \ + ((__m256) __builtin_ia32_getmantps256_mask ((__v8sf)(__m256) (X), \ + (int)(((C)<<2) | (B)), \ + (__v8sf)(__m256)_mm256_setzero_ps (), \ + (__mmask8)-1)) + +#define _mm256_mask_getmant_ps(W, U, X, B, C) \ + ((__m256) __builtin_ia32_getmantps256_mask ((__v8sf)(__m256) (X), \ + (int)(((C)<<2) | (B)), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_getmant_ps(U, X, B, C) \ + ((__m256) __builtin_ia32_getmantps256_mask ((__v8sf)(__m256) (X), \ + (int)(((C)<<2) | (B)), \ + (__v8sf)(__m256)_mm256_setzero_ps (), \ + (__mmask8)(U))) + +#define _mm_getmant_ps(X, B, C) \ + ((__m128) __builtin_ia32_getmantps128_mask ((__v4sf)(__m128) (X), \ + (int)(((C)<<2) | (B)), \ + (__v4sf)(__m128)_mm_setzero_ps (), \ + (__mmask8)-1)) + +#define _mm_mask_getmant_ps(W, U, X, B, C) \ + ((__m128) __builtin_ia32_getmantps128_mask ((__v4sf)(__m128) (X), \ + (int)(((C)<<2) | (B)), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_getmant_ps(U, X, B, C) \ + ((__m128) __builtin_ia32_getmantps128_mask ((__v4sf)(__m128) (X), \ + (int)(((C)<<2) | (B)), \ + (__v4sf)(__m128)_mm_setzero_ps (), \ + (__mmask8)(U))) + +#define _mm256_getmant_pd(X, B, C) \ + ((__m256d) __builtin_ia32_getmantpd256_mask ((__v4df)(__m256d) (X), \ + (int)(((C)<<2) | (B)), \ + (__v4df)(__m256d)_mm256_setzero_pd (),\ + (__mmask8)-1)) + +#define _mm256_mask_getmant_pd(W, U, X, B, C) \ + ((__m256d) __builtin_ia32_getmantpd256_mask ((__v4df)(__m256d) (X), \ + (int)(((C)<<2) | (B)), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_getmant_pd(U, X, B, C) \ + ((__m256d) __builtin_ia32_getmantpd256_mask ((__v4df)(__m256d) (X), \ + (int)(((C)<<2) | (B)), \ + (__v4df)(__m256d)_mm256_setzero_pd (),\ + (__mmask8)(U))) + +#define _mm_getmant_pd(X, B, C) \ + ((__m128d) __builtin_ia32_getmantpd128_mask ((__v2df)(__m128d) (X), \ + (int)(((C)<<2) | (B)), \ + (__v2df)(__m128d)_mm_setzero_pd (), \ + (__mmask8)-1)) + +#define _mm_mask_getmant_pd(W, U, X, B, C) \ + ((__m128d) __builtin_ia32_getmantpd128_mask ((__v2df)(__m128d) (X), \ + (int)(((C)<<2) | (B)), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_getmant_pd(U, X, B, C) \ + ((__m128d) __builtin_ia32_getmantpd128_mask ((__v2df)(__m128d) (X), \ + (int)(((C)<<2) | (B)), \ + (__v2df)(__m128d)_mm_setzero_pd (), \ + (__mmask8)(U))) + +#define _mm256_mmask_i32gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m256) __builtin_ia32_gather3siv8sf ((__v8sf)(__m256) (V1OLD), \ + (void const *) (ADDR), \ + (__v8si)(__m256i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm_mmask_i32gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m128) __builtin_ia32_gather3siv4sf ((__v4sf)(__m128) (V1OLD), \ + (void const *) (ADDR), \ + (__v4si)(__m128i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm256_mmask_i32gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m256d) __builtin_ia32_gather3siv4df ((__v4df)(__m256d) (V1OLD), \ + (void const *) (ADDR), \ + (__v4si)(__m128i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm_mmask_i32gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m128d) __builtin_ia32_gather3siv2df ((__v2df)(__m128d) (V1OLD), \ + (void const *) (ADDR), \ + (__v4si)(__m128i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm256_mmask_i64gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m128) __builtin_ia32_gather3div8sf ((__v4sf)(__m128) (V1OLD), \ + (void const *) (ADDR), \ + (__v4di)(__m256i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm_mmask_i64gather_ps(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m128) __builtin_ia32_gather3div4sf ((__v4sf)(__m128) (V1OLD), \ + (void const *) (ADDR), \ + (__v2di)(__m128i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm256_mmask_i64gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m256d) __builtin_ia32_gather3div4df ((__v4df)(__m256d) (V1OLD), \ + (void const *) (ADDR), \ + (__v4di)(__m256i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm_mmask_i64gather_pd(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m128d) __builtin_ia32_gather3div2df ((__v2df)(__m128d) (V1OLD), \ + (void const *) (ADDR), \ + (__v2di)(__m128i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm256_mmask_i32gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m256i) __builtin_ia32_gather3siv8si ((__v8si)(__m256i) (V1OLD), \ + (void const *) (ADDR), \ + (__v8si)(__m256i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm_mmask_i32gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m128i) __builtin_ia32_gather3siv4si ((__v4si)(__m128i) (V1OLD), \ + (void const *) (ADDR), \ + (__v4si)(__m128i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm256_mmask_i32gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m256i) __builtin_ia32_gather3siv4di ((__v4di)(__m256i) (V1OLD), \ + (void const *) (ADDR), \ + (__v4si)(__m128i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm_mmask_i32gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m128i) __builtin_ia32_gather3siv2di ((__v2di)(__m128i) (V1OLD), \ + (void const *) (ADDR), \ + (__v4si)(__m128i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm256_mmask_i64gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m128i) __builtin_ia32_gather3div8si ((__v4si)(__m128i) (V1OLD), \ + (void const *) (ADDR), \ + (__v4di)(__m256i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm_mmask_i64gather_epi32(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m128i) __builtin_ia32_gather3div4si ((__v4si)(__m128i) (V1OLD), \ + (void const *) (ADDR), \ + (__v2di)(__m128i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm256_mmask_i64gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m256i) __builtin_ia32_gather3div4di ((__v4di)(__m256i) (V1OLD), \ + (void const *) (ADDR), \ + (__v4di)(__m256i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm_mmask_i64gather_epi64(V1OLD, MASK, INDEX, ADDR, SCALE) \ + (__m128i) __builtin_ia32_gather3div2di ((__v2di)(__m128i) (V1OLD), \ + (void const *) (ADDR), \ + (__v2di)(__m128i) (INDEX), \ + (__mmask8) (MASK), \ + (int) (SCALE)) + +#define _mm256_i32scatter_ps(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv8sf ((void *) (ADDR), (__mmask8)0xFF, \ + (__v8si)(__m256i) (INDEX), \ + (__v8sf)(__m256) (V1), (int) (SCALE)) + +#define _mm256_mask_i32scatter_ps(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv8sf ((void *) (ADDR), (__mmask8) (MASK), \ + (__v8si)(__m256i) (INDEX), \ + (__v8sf)(__m256) (V1), (int) (SCALE)) + +#define _mm_i32scatter_ps(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv4sf ((void *) (ADDR), (__mmask8)0xFF, \ + (__v4si)(__m128i) (INDEX), \ + (__v4sf)(__m128) (V1), (int) (SCALE)) + +#define _mm_mask_i32scatter_ps(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv4sf ((void *) (ADDR), (__mmask8) (MASK), \ + (__v4si)(__m128i) (INDEX), \ + (__v4sf)(__m128) (V1), (int) (SCALE)) + +#define _mm256_i32scatter_pd(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv4df ((void *) (ADDR), (__mmask8)0xFF, \ + (__v4si)(__m128i) (INDEX), \ + (__v4df)(__m256d) (V1), (int) (SCALE)) + +#define _mm256_mask_i32scatter_pd(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv4df ((void *) (ADDR), (__mmask8) (MASK), \ + (__v4si)(__m128i) (INDEX), \ + (__v4df)(__m256d) (V1), (int) (SCALE)) + +#define _mm_i32scatter_pd(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv2df ((void *) (ADDR), (__mmask8)0xFF, \ + (__v4si)(__m128i) (INDEX), \ + (__v2df)(__m128d) (V1), (int) (SCALE)) + +#define _mm_mask_i32scatter_pd(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv2df ((void *) (ADDR), (__mmask8) (MASK), \ + (__v4si)(__m128i) (INDEX), \ + (__v2df)(__m128d) (V1), (int) (SCALE)) + +#define _mm256_i64scatter_ps(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv8sf ((void *) (ADDR), (__mmask8)0xFF, \ + (__v4di)(__m256i) (INDEX), \ + (__v4sf)(__m128) (V1), (int) (SCALE)) + +#define _mm256_mask_i64scatter_ps(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv8sf ((void *) (ADDR), (__mmask8) (MASK), \ + (__v4di)(__m256i) (INDEX), \ + (__v4sf)(__m128) (V1), (int) (SCALE)) + +#define _mm_i64scatter_ps(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv4sf ((void *) (ADDR), (__mmask8)0xFF, \ + (__v2di)(__m128i) (INDEX), \ + (__v4sf)(__m128) (V1), (int) (SCALE)) + +#define _mm_mask_i64scatter_ps(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv4sf ((void *) (ADDR), (__mmask8) (MASK), \ + (__v2di)(__m128i) (INDEX), \ + (__v4sf)(__m128) (V1), (int) (SCALE)) + +#define _mm256_i64scatter_pd(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv4df ((void *) (ADDR), (__mmask8)0xFF, \ + (__v4di)(__m256i) (INDEX), \ + (__v4df)(__m256d) (V1), (int) (SCALE)) + +#define _mm256_mask_i64scatter_pd(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv4df ((void *) (ADDR), (__mmask8) (MASK), \ + (__v4di)(__m256i) (INDEX), \ + (__v4df)(__m256d) (V1), (int) (SCALE)) + +#define _mm_i64scatter_pd(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv2df ((void *) (ADDR), (__mmask8)0xFF, \ + (__v2di)(__m128i) (INDEX), \ + (__v2df)(__m128d) (V1), (int) (SCALE)) + +#define _mm_mask_i64scatter_pd(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv2df ((void *) (ADDR), (__mmask8) (MASK), \ + (__v2di)(__m128i) (INDEX), \ + (__v2df)(__m128d) (V1), (int) (SCALE)) + +#define _mm256_i32scatter_epi32(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv8si ((void *) (ADDR), (__mmask8)0xFF, \ + (__v8si)(__m256i) (INDEX), \ + (__v8si)(__m256i) (V1), (int) (SCALE)) + +#define _mm256_mask_i32scatter_epi32(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv8si ((void *) (ADDR), (__mmask8) (MASK), \ + (__v8si)(__m256i) (INDEX), \ + (__v8si)(__m256i) (V1), (int) (SCALE)) + +#define _mm_i32scatter_epi32(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv4si ((void *) (ADDR), (__mmask8)0xFF, \ + (__v4si)(__m128i) (INDEX), \ + (__v4si)(__m128i) (V1), (int) (SCALE)) + +#define _mm_mask_i32scatter_epi32(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv4si ((void *) (ADDR), (__mmask8) (MASK), \ + (__v4si)(__m128i) (INDEX), \ + (__v4si)(__m128i) (V1), (int) (SCALE)) + +#define _mm256_i32scatter_epi64(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv4di ((void *) (ADDR), (__mmask8)0xFF, \ + (__v4si)(__m128i) (INDEX), \ + (__v4di)(__m256i) (V1), (int) (SCALE)) + +#define _mm256_mask_i32scatter_epi64(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv4di ((void *) (ADDR), (__mmask8) (MASK), \ + (__v4si)(__m128i) (INDEX), \ + (__v4di)(__m256i) (V1), (int) (SCALE)) + +#define _mm_i32scatter_epi64(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv2di ((void *) (ADDR), (__mmask8)0xFF, \ + (__v4si)(__m128i) (INDEX), \ + (__v2di)(__m128i) (V1), (int) (SCALE)) + +#define _mm_mask_i32scatter_epi64(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scattersiv2di ((void *) (ADDR), (__mmask8) (MASK), \ + (__v4si)(__m128i) (INDEX), \ + (__v2di)(__m128i) (V1), (int) (SCALE)) + +#define _mm256_i64scatter_epi32(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv8si ((void *) (ADDR), (__mmask8)0xFF, \ + (__v4di)(__m256i) (INDEX), \ + (__v4si)(__m128i) (V1), (int) (SCALE)) + +#define _mm256_mask_i64scatter_epi32(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv8si ((void *) (ADDR), (__mmask8) (MASK), \ + (__v4di)(__m256i) (INDEX), \ + (__v4si)(__m128i) (V1), (int) (SCALE)) + +#define _mm_i64scatter_epi32(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv4si ((void *) (ADDR), (__mmask8)0xFF, \ + (__v2di)(__m128i) (INDEX), \ + (__v4si)(__m128i) (V1), (int) (SCALE)) + +#define _mm_mask_i64scatter_epi32(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv4si ((void *) (ADDR), (__mmask8) (MASK), \ + (__v2di)(__m128i) (INDEX), \ + (__v4si)(__m128i) (V1), (int) (SCALE)) + +#define _mm256_i64scatter_epi64(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv4di ((void *) (ADDR), (__mmask8)0xFF, \ + (__v4di)(__m256i) (INDEX), \ + (__v4di)(__m256i) (V1), (int) (SCALE)) + +#define _mm256_mask_i64scatter_epi64(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv4di ((void *) (ADDR), (__mmask8) (MASK), \ + (__v4di)(__m256i) (INDEX), \ + (__v4di)(__m256i) (V1), (int) (SCALE)) + +#define _mm_i64scatter_epi64(ADDR, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv2di ((void *) (ADDR), (__mmask8)0xFF, \ + (__v2di)(__m128i) (INDEX), \ + (__v2di)(__m128i) (V1), (int) (SCALE)) + +#define _mm_mask_i64scatter_epi64(ADDR, MASK, INDEX, V1, SCALE) \ + __builtin_ia32_scatterdiv2di ((void *) (ADDR), (__mmask8) (MASK), \ + (__v2di)(__m128i) (INDEX), \ + (__v2di)(__m128i) (V1), (int) (SCALE)) + +#define _mm256_mask_shuffle_epi32(W, U, X, C) \ + ((__m256i) __builtin_ia32_pshufd256_mask ((__v8si)(__m256i)(X), (int)(C), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_shuffle_epi32(U, X, C) \ + ((__m256i) __builtin_ia32_pshufd256_mask ((__v8si)(__m256i)(X), (int)(C), \ + (__v8si)(__m256i) \ + _mm256_setzero_si256 (), \ + (__mmask8)(U))) + +#define _mm_mask_shuffle_epi32(W, U, X, C) \ + ((__m128i) __builtin_ia32_pshufd128_mask ((__v4si)(__m128i)(X), (int)(C), \ + (__v4si)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_shuffle_epi32(U, X, C) \ + ((__m128i) __builtin_ia32_pshufd128_mask ((__v4si)(__m128i)(X), (int)(C), \ + (__v4si)(__m128i)_mm_setzero_si128 (), \ + (__mmask8)(U))) + +#define _mm256_rol_epi64(A, B) \ + ((__m256i)__builtin_ia32_prolq256_mask ((__v4di)(__m256i)(A), (int)(B), \ + (__v4di)(__m256i)_mm256_setzero_si256 (),\ + (__mmask8)-1)) + +#define _mm256_mask_rol_epi64(W, U, A, B) \ + ((__m256i)__builtin_ia32_prolq256_mask ((__v4di)(__m256i)(A), (int)(B), \ + (__v4di)(__m256i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_rol_epi64(U, A, B) \ + ((__m256i)__builtin_ia32_prolq256_mask ((__v4di)(__m256i)(A), (int)(B), \ + (__v4di)(__m256i)_mm256_setzero_si256 (),\ + (__mmask8)(U))) + +#define _mm_rol_epi64(A, B) \ + ((__m128i)__builtin_ia32_prolq128_mask ((__v2di)(__m128i)(A), (int)(B), \ + (__v2di)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)-1)) + +#define _mm_mask_rol_epi64(W, U, A, B) \ + ((__m128i)__builtin_ia32_prolq128_mask ((__v2di)(__m128i)(A), (int)(B), \ + (__v2di)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_rol_epi64(U, A, B) \ + ((__m128i)__builtin_ia32_prolq128_mask ((__v2di)(__m128i)(A), (int)(B), \ + (__v2di)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)(U))) + +#define _mm256_ror_epi64(A, B) \ + ((__m256i)__builtin_ia32_prorq256_mask ((__v4di)(__m256i)(A), (int)(B), \ + (__v4di)(__m256i)_mm256_setzero_si256 (),\ + (__mmask8)-1)) + +#define _mm256_mask_ror_epi64(W, U, A, B) \ + ((__m256i)__builtin_ia32_prorq256_mask ((__v4di)(__m256i)(A), (int)(B), \ + (__v4di)(__m256i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_ror_epi64(U, A, B) \ + ((__m256i)__builtin_ia32_prorq256_mask ((__v4di)(__m256i)(A), (int)(B), \ + (__v4di)(__m256i)_mm256_setzero_si256 (),\ + (__mmask8)(U))) + +#define _mm_ror_epi64(A, B) \ + ((__m128i)__builtin_ia32_prorq128_mask ((__v2di)(__m128i)(A), (int)(B), \ + (__v2di)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)-1)) + +#define _mm_mask_ror_epi64(W, U, A, B) \ + ((__m128i)__builtin_ia32_prorq128_mask ((__v2di)(__m128i)(A), (int)(B), \ + (__v2di)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_ror_epi64(U, A, B) \ + ((__m128i)__builtin_ia32_prorq128_mask ((__v2di)(__m128i)(A), (int)(B), \ + (__v2di)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)(U))) + +#define _mm256_rol_epi32(A, B) \ + ((__m256i)__builtin_ia32_prold256_mask ((__v8si)(__m256i)(A), (int)(B), \ + (__v8si)(__m256i)_mm256_setzero_si256 (),\ + (__mmask8)-1)) + +#define _mm256_mask_rol_epi32(W, U, A, B) \ + ((__m256i)__builtin_ia32_prold256_mask ((__v8si)(__m256i)(A), (int)(B), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_rol_epi32(U, A, B) \ + ((__m256i)__builtin_ia32_prold256_mask ((__v8si)(__m256i)(A), (int)(B), \ + (__v8si)(__m256i)_mm256_setzero_si256 (),\ + (__mmask8)(U))) + +#define _mm_rol_epi32(A, B) \ + ((__m128i)__builtin_ia32_prold128_mask ((__v4si)(__m128i)(A), (int)(B), \ + (__v4si)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)-1)) + +#define _mm_mask_rol_epi32(W, U, A, B) \ + ((__m128i)__builtin_ia32_prold128_mask ((__v4si)(__m128i)(A), (int)(B), \ + (__v4si)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_rol_epi32(U, A, B) \ + ((__m128i)__builtin_ia32_prold128_mask ((__v4si)(__m128i)(A), (int)(B), \ + (__v4si)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)(U))) + +#define _mm256_ror_epi32(A, B) \ + ((__m256i)__builtin_ia32_prord256_mask ((__v8si)(__m256i)(A), (int)(B), \ + (__v8si)(__m256i)_mm256_setzero_si256 (),\ + (__mmask8)-1)) + +#define _mm256_mask_ror_epi32(W, U, A, B) \ + ((__m256i)__builtin_ia32_prord256_mask ((__v8si)(__m256i)(A), (int)(B), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_ror_epi32(U, A, B) \ + ((__m256i)__builtin_ia32_prord256_mask ((__v8si)(__m256i)(A), (int)(B), \ + (__v8si)(__m256i) \ + _mm256_setzero_si256 (), \ + (__mmask8)(U))) + +#define _mm_ror_epi32(A, B) \ + ((__m128i)__builtin_ia32_prord128_mask ((__v4si)(__m128i)(A), (int)(B), \ + (__v4si)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)-1)) + +#define _mm_mask_ror_epi32(W, U, A, B) \ + ((__m128i)__builtin_ia32_prord128_mask ((__v4si)(__m128i)(A), (int)(B), \ + (__v4si)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_ror_epi32(U, A, B) \ + ((__m128i)__builtin_ia32_prord128_mask ((__v4si)(__m128i)(A), (int)(B), \ + (__v4si)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)(U))) + +#define _mm256_alignr_epi32(X, Y, C) \ + ((__m256i)__builtin_ia32_alignd256_mask ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), (int)(C), (__v8si)(__m256i)(X), (__mmask8)-1)) + +#define _mm256_mask_alignr_epi32(W, U, X, Y, C) \ + ((__m256i)__builtin_ia32_alignd256_mask ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), (int)(C), (__v8si)(__m256i)(W), (__mmask8)(U))) + +#define _mm256_maskz_alignr_epi32(U, X, Y, C) \ + ((__m256i)__builtin_ia32_alignd256_mask ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), (int)(C), (__v8si)(__m256i)_mm256_setzero_si256 (),\ + (__mmask8)(U))) + +#define _mm256_alignr_epi64(X, Y, C) \ + ((__m256i)__builtin_ia32_alignq256_mask ((__v4di)(__m256i)(X), \ + (__v4di)(__m256i)(Y), (int)(C), (__v4di)(__m256i)(X), (__mmask8)-1)) + +#define _mm256_mask_alignr_epi64(W, U, X, Y, C) \ + ((__m256i)__builtin_ia32_alignq256_mask ((__v4di)(__m256i)(X), \ + (__v4di)(__m256i)(Y), (int)(C), (__v4di)(__m256i)(W), (__mmask8)(U))) + +#define _mm256_maskz_alignr_epi64(U, X, Y, C) \ + ((__m256i)__builtin_ia32_alignq256_mask ((__v4di)(__m256i)(X), \ + (__v4di)(__m256i)(Y), (int)(C), (__v4di)(__m256i)_mm256_setzero_si256 (),\ + (__mmask8)(U))) + +#define _mm_alignr_epi32(X, Y, C) \ + ((__m128i)__builtin_ia32_alignd128_mask ((__v4si)(__m128i)(X), \ + (__v4si)(__m128i)(Y), (int)(C), (__v4si)(__m128i)(X), (__mmask8)-1)) + +#define _mm_mask_alignr_epi32(W, U, X, Y, C) \ + ((__m128i)__builtin_ia32_alignd128_mask ((__v4si)(__m128i)(X), \ + (__v4si)(__m128i)(Y), (int)(C), (__v4si)(__m128i)(W), (__mmask8)(U))) + +#define _mm_maskz_alignr_epi32(U, X, Y, C) \ + ((__m128i)__builtin_ia32_alignd128_mask ((__v4si)(__m128i)(X), \ + (__v4si)(__m128i)(Y), (int)(C), (__v4si)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)(U))) + +#define _mm_alignr_epi64(X, Y, C) \ + ((__m128i)__builtin_ia32_alignq128_mask ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (int)(C), (__v2di)(__m128i)(X), (__mmask8)-1)) + +#define _mm_mask_alignr_epi64(W, U, X, Y, C) \ + ((__m128i)__builtin_ia32_alignq128_mask ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (int)(C), (__v2di)(__m128i)(X), (__mmask8)-1)) + +#define _mm_maskz_alignr_epi64(U, X, Y, C) \ + ((__m128i)__builtin_ia32_alignq128_mask ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (int)(C), (__v2di)(__m128i)_mm_setzero_si128 (),\ + (__mmask8)(U))) + +#define _mm_mask_cvtps_ph(W, U, A, I) \ + ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) (A), (int) (I), \ + (__v8hi)(__m128i) (W), (__mmask8) (U))) + +#define _mm_maskz_cvtps_ph(U, A, I) \ + ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) (A), (int) (I), \ + (__v8hi)(__m128i) _mm_setzero_si128 (), (__mmask8) (U))) + +#define _mm256_mask_cvtps_ph(W, U, A, I) \ + ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) (A), (int) (I), \ + (__v8hi)(__m128i) (W), (__mmask8) (U))) + +#define _mm256_maskz_cvtps_ph(U, A, I) \ + ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) (A), (int) (I), \ + (__v8hi)(__m128i) _mm_setzero_si128 (), (__mmask8) (U))) + +#define _mm256_mask_srai_epi32(W, U, A, B) \ + ((__m256i) __builtin_ia32_psradi256_mask ((__v8si)(__m256i)(A), \ + (int)(B), (__v8si)(__m256i)(W), (__mmask8)(U))) + +#define _mm256_maskz_srai_epi32(U, A, B) \ + ((__m256i) __builtin_ia32_psradi256_mask ((__v8si)(__m256i)(A), \ + (int)(B), (__v8si)_mm256_setzero_si256 (), (__mmask8)(U))) + +#define _mm_mask_srai_epi32(W, U, A, B) \ + ((__m128i) __builtin_ia32_psradi128_mask ((__v4si)(__m128i)(A), \ + (int)(B), (__v4si)(__m128i)(W), (__mmask8)(U))) + +#define _mm_maskz_srai_epi32(U, A, B) \ + ((__m128i) __builtin_ia32_psradi128_mask ((__v4si)(__m128i)(A), \ + (int)(B), (__v4si)_mm_setzero_si128 (), (__mmask8)(U))) + +#define _mm256_srai_epi64(A, B) \ + ((__m256i) __builtin_ia32_psraqi256_mask ((__v4di)(__m256i)(A), \ + (int)(B), (__v4di)_mm256_setzero_si256 (), (__mmask8)-1)) + +#define _mm256_mask_srai_epi64(W, U, A, B) \ + ((__m256i) __builtin_ia32_psraqi256_mask ((__v4di)(__m256i)(A), \ + (int)(B), (__v4di)(__m256i)(W), (__mmask8)(U))) + +#define _mm256_maskz_srai_epi64(U, A, B) \ + ((__m256i) __builtin_ia32_psraqi256_mask ((__v4di)(__m256i)(A), \ + (int)(B), (__v4di)_mm256_setzero_si256 (), (__mmask8)(U))) + +#define _mm_srai_epi64(A, B) \ + ((__m128i) __builtin_ia32_psraqi128_mask ((__v2di)(__m128i)(A), \ + (int)(B), (__v2di)_mm_setzero_si128 (), (__mmask8)-1)) + +#define _mm_mask_srai_epi64(W, U, A, B) \ + ((__m128i) __builtin_ia32_psraqi128_mask ((__v2di)(__m128i)(A), \ + (int)(B), (__v2di)(__m128i)(W), (__mmask8)(U))) + +#define _mm_maskz_srai_epi64(U, A, B) \ + ((__m128i) __builtin_ia32_psraqi128_mask ((__v2di)(__m128i)(A), \ + (int)(B), (__v2di)_mm_setzero_si128 (), (__mmask8)(U))) + +#define _mm256_mask_permutex_pd(W, U, A, B) \ + ((__m256d) __builtin_ia32_permdf256_mask ((__v4df)(__m256d)(A), \ + (int)(B), (__v4df)(__m256d)(W), (__mmask8)(U))) + +#define _mm256_maskz_permutex_pd(U, A, B) \ + ((__m256d) __builtin_ia32_permdf256_mask ((__v4df)(__m256d)(A), \ + (int)(B), (__v4df)(__m256d)_mm256_setzero_pd (), (__mmask8)(U))) + +#define _mm256_mask_permute_pd(W, U, X, C) \ + ((__m256d) __builtin_ia32_vpermilpd256_mask ((__v4df)(__m256d)(X), (int)(C), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_permute_pd(U, X, C) \ + ((__m256d) __builtin_ia32_vpermilpd256_mask ((__v4df)(__m256d)(X), (int)(C), \ + (__v4df)(__m256d)_mm256_setzero_pd (),\ + (__mmask8)(U))) + +#define _mm256_mask_permute_ps(W, U, X, C) \ + ((__m256) __builtin_ia32_vpermilps256_mask ((__v8sf)(__m256)(X), (int)(C), \ + (__v8sf)(__m256)(W), (__mmask8)(U))) + +#define _mm256_maskz_permute_ps(U, X, C) \ + ((__m256) __builtin_ia32_vpermilps256_mask ((__v8sf)(__m256)(X), (int)(C), \ + (__v8sf)(__m256)_mm256_setzero_ps (), \ + (__mmask8)(U))) + +#define _mm_mask_permute_pd(W, U, X, C) \ + ((__m128d) __builtin_ia32_vpermilpd_mask ((__v2df)(__m128d)(X), (int)(C), \ + (__v2df)(__m128d)(W), (__mmask8)(U))) + +#define _mm_maskz_permute_pd(U, X, C) \ + ((__m128d) __builtin_ia32_vpermilpd_mask ((__v2df)(__m128d)(X), (int)(C), \ + (__v2df)(__m128d)_mm_setzero_pd (), \ + (__mmask8)(U))) + +#define _mm_mask_permute_ps(W, U, X, C) \ + ((__m128) __builtin_ia32_vpermilps_mask ((__v4sf)(__m128)(X), (int)(C), \ + (__v4sf)(__m128)(W), (__mmask8)(U))) + +#define _mm_maskz_permute_ps(U, X, C) \ + ((__m128) __builtin_ia32_vpermilps_mask ((__v4sf)(__m128)(X), (int)(C), \ + (__v4sf)(__m128)_mm_setzero_ps (), \ + (__mmask8)(U))) + +#define _mm256_mask_blend_pd(__U, __A, __W) \ + ((__m256d) __builtin_ia32_blendmpd_256_mask ((__v4df) (__A), \ + (__v4df) (__W), \ + (__mmask8) (__U))) + +#define _mm256_mask_blend_ps(__U, __A, __W) \ + ((__m256) __builtin_ia32_blendmps_256_mask ((__v8sf) (__A), \ + (__v8sf) (__W), \ + (__mmask8) (__U))) + +#define _mm256_mask_blend_epi64(__U, __A, __W) \ + ((__m256i) __builtin_ia32_blendmq_256_mask ((__v4di) (__A), \ + (__v4di) (__W), \ + (__mmask8) (__U))) + +#define _mm256_mask_blend_epi32(__U, __A, __W) \ + ((__m256i) __builtin_ia32_blendmd_256_mask ((__v8si) (__A), \ + (__v8si) (__W), \ + (__mmask8) (__U))) + +#define _mm_mask_blend_pd(__U, __A, __W) \ + ((__m128d) __builtin_ia32_blendmpd_128_mask ((__v2df) (__A), \ + (__v2df) (__W), \ + (__mmask8) (__U))) + +#define _mm_mask_blend_ps(__U, __A, __W) \ + ((__m128) __builtin_ia32_blendmps_128_mask ((__v4sf) (__A), \ + (__v4sf) (__W), \ + (__mmask8) (__U))) + +#define _mm_mask_blend_epi64(__U, __A, __W) \ + ((__m128i) __builtin_ia32_blendmq_128_mask ((__v2di) (__A), \ + (__v2di) (__W), \ + (__mmask8) (__U))) + +#define _mm_mask_blend_epi32(__U, __A, __W) \ + ((__m128i) __builtin_ia32_blendmd_128_mask ((__v4si) (__A), \ + (__v4si) (__W), \ + (__mmask8) (__U))) + +#define _mm256_cmp_epu32_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm256_cmp_epi64_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpq256_mask ((__v4di)(__m256i)(X), \ + (__v4di)(__m256i)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm256_cmp_epi32_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpd256_mask ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm256_cmp_epu64_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di)(__m256i)(X), \ + (__v4di)(__m256i)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm256_cmp_pd_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmppd256_mask ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm256_cmp_ps_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpps256_mask ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm256_mask_cmp_epi64_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpq256_mask ((__v4di)(__m256i)(X), \ + (__v4di)(__m256i)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm256_mask_cmp_epi32_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpd256_mask ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm256_mask_cmp_epu64_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_ucmpq256_mask ((__v4di)(__m256i)(X), \ + (__v4di)(__m256i)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm256_mask_cmp_epu32_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_ucmpd256_mask ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm256_mask_cmp_pd_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmppd256_mask ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm256_mask_cmp_ps_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpps256_mask ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm_cmp_epi64_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpq128_mask ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm_cmp_epi32_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpd128_mask ((__v4si)(__m128i)(X), \ + (__v4si)(__m128i)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm_cmp_epu64_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm_cmp_epu32_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si)(__m128i)(X), \ + (__v4si)(__m128i)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm_cmp_pd_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmppd128_mask ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm_cmp_ps_mask(X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpps128_mask ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P),\ + (__mmask8)-1)) + +#define _mm_mask_cmp_epi64_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpq128_mask ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm_mask_cmp_epi32_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpd128_mask ((__v4si)(__m128i)(X), \ + (__v4si)(__m128i)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm_mask_cmp_epu64_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_ucmpq128_mask ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm_mask_cmp_epu32_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_ucmpd128_mask ((__v4si)(__m128i)(X), \ + (__v4si)(__m128i)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm_mask_cmp_pd_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmppd128_mask ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P),\ + (__mmask8)(M))) + +#define _mm_mask_cmp_ps_mask(M, X, Y, P) \ + ((__mmask8) __builtin_ia32_cmpps128_mask ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P),\ + (__mmask8)(M))) + +#endif + +#define _mm256_permutexvar_ps(A, B) _mm256_permutevar8x32_ps ((B), (A)) +#define _mm256_mask_cvt_roundps_ph(A, B, C, D) \ + _mm256_mask_cvtps_ph ((A), (B), (C), (D)) +#define _mm256_maskz_cvt_roundps_ph(A, B, C) \ + _mm256_maskz_cvtps_ph ((A), (B), (C)) +#define _mm_mask_cvt_roundps_ph(A, B, C, D) \ + _mm_mask_cvtps_ph ((A), (B), (C), (D)) +#define _mm_maskz_cvt_roundps_ph(A, B, C) _mm_maskz_cvtps_ph ((A), (B), (C)) + +#ifdef __DISABLE_AVX512VL__ +#undef __DISABLE_AVX512VL__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VL__ */ + +#endif /* _AVX512VLINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vnniintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vnniintrin.h new file mode 100644 index 0000000..8161927 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vnniintrin.h @@ -0,0 +1,144 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VNNIINTRIN_H_INCLUDED +#define __AVX512VNNIINTRIN_H_INCLUDED + +#if !defined(__AVX512VNNI__) +#pragma GCC push_options +#pragma GCC target("avx512vnni") +#define __DISABLE_AVX512VNNI__ +#endif /* __AVX512VNNI__ */ + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_dpbusd_epi32 (__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_vpdpbusd_v16si ((__v16si)__A, (__v16si) __B, + (__v16si) __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_dpbusd_epi32 (__m512i __A, __mmask16 __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpdpbusd_v16si_mask ((__v16si)__A, + (__v16si) __C, (__v16si) __D, (__mmask16)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_dpbusd_epi32 (__mmask16 __A, __m512i __B, __m512i __C, + __m512i __D) +{ + return (__m512i)__builtin_ia32_vpdpbusd_v16si_maskz ((__v16si)__B, + (__v16si) __C, (__v16si) __D, (__mmask16)__A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_dpbusds_epi32 (__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_vpdpbusds_v16si ((__v16si)__A, (__v16si) __B, + (__v16si) __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_dpbusds_epi32 (__m512i __A, __mmask16 __B, __m512i __C, + __m512i __D) +{ + return (__m512i)__builtin_ia32_vpdpbusds_v16si_mask ((__v16si)__A, + (__v16si) __C, (__v16si) __D, (__mmask16)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_dpbusds_epi32 (__mmask16 __A, __m512i __B, __m512i __C, + __m512i __D) +{ + return (__m512i)__builtin_ia32_vpdpbusds_v16si_maskz ((__v16si)__B, + (__v16si) __C, (__v16si) __D, (__mmask16)__A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_dpwssd_epi32 (__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_vpdpwssd_v16si ((__v16si)__A, (__v16si) __B, + (__v16si) __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_dpwssd_epi32 (__m512i __A, __mmask16 __B, __m512i __C, __m512i __D) +{ + return (__m512i)__builtin_ia32_vpdpwssd_v16si_mask ((__v16si)__A, + (__v16si) __C, (__v16si) __D, (__mmask16)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_dpwssd_epi32 (__mmask16 __A, __m512i __B, __m512i __C, + __m512i __D) +{ + return (__m512i)__builtin_ia32_vpdpwssd_v16si_maskz ((__v16si)__B, + (__v16si) __C, (__v16si) __D, (__mmask16)__A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_dpwssds_epi32 (__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_vpdpwssds_v16si ((__v16si)__A, (__v16si) __B, + (__v16si) __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_dpwssds_epi32 (__m512i __A, __mmask16 __B, __m512i __C, + __m512i __D) +{ + return (__m512i)__builtin_ia32_vpdpwssds_v16si_mask ((__v16si)__A, + (__v16si) __C, (__v16si) __D, (__mmask16)__B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_dpwssds_epi32 (__mmask16 __A, __m512i __B, __m512i __C, + __m512i __D) +{ + return (__m512i)__builtin_ia32_vpdpwssds_v16si_maskz ((__v16si)__B, + (__v16si) __C, (__v16si) __D, (__mmask16)__A); +} + +#ifdef __DISABLE_AVX512VNNI__ +#undef __DISABLE_AVX512VNNI__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VNNI__ */ + +#endif /* __AVX512VNNIINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vnnivlintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vnnivlintrin.h new file mode 100644 index 0000000..03595a8 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vnnivlintrin.h @@ -0,0 +1,210 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VNNIVLINTRIN_H_INCLUDED +#define _AVX512VNNIVLINTRIN_H_INCLUDED + +#if !defined(__AVX512VL__) || !defined(__AVX512VNNI__) +#pragma GCC push_options +#pragma GCC target("avx512vnni,avx512vl") +#define __DISABLE_AVX512VNNIVL__ +#endif /* __AVX512VNNIVL__ */ + +#define _mm256_dpbusd_epi32(A, B, C) \ + ((__m256i) __builtin_ia32_vpdpbusd_v8si ((__v8si) (A), \ + (__v8si) (B), \ + (__v8si) (C))) + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_dpbusd_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpdpbusd_v8si_mask ((__v8si)__A, (__v8si) __C, + (__v8si) __D, (__mmask8)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_dpbusd_epi32 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpdpbusd_v8si_maskz ((__v8si)__B, + (__v8si) __C, (__v8si) __D, (__mmask8)__A); +} + +#define _mm_dpbusd_epi32(A, B, C) \ + ((__m128i) __builtin_ia32_vpdpbusd_v4si ((__v4si) (A), \ + (__v4si) (B), \ + (__v4si) (C))) + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_dpbusd_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpdpbusd_v4si_mask ((__v4si)__A, (__v4si) __C, + (__v4si) __D, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_dpbusd_epi32 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpdpbusd_v4si_maskz ((__v4si)__B, + (__v4si) __C, (__v4si) __D, (__mmask8)__A); +} + +#define _mm256_dpbusds_epi32(A, B, C) \ + ((__m256i) __builtin_ia32_vpdpbusds_v8si ((__v8si) (A), \ + (__v8si) (B), \ + (__v8si) (C))) + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_dpbusds_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpdpbusds_v8si_mask ((__v8si)__A, + (__v8si) __C, (__v8si) __D, (__mmask8)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_dpbusds_epi32 (__mmask8 __A, __m256i __B, __m256i __C, + __m256i __D) +{ + return (__m256i)__builtin_ia32_vpdpbusds_v8si_maskz ((__v8si)__B, + (__v8si) __C, (__v8si) __D, (__mmask8)__A); +} + +#define _mm_dpbusds_epi32(A, B, C) \ + ((__m128i) __builtin_ia32_vpdpbusds_v4si ((__v4si) (A), \ + (__v4si) (B), \ + (__v4si) (C))) + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_dpbusds_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpdpbusds_v4si_mask ((__v4si)__A, + (__v4si) __C, (__v4si) __D, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_dpbusds_epi32 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpdpbusds_v4si_maskz ((__v4si)__B, + (__v4si) __C, (__v4si) __D, (__mmask8)__A); +} + +#define _mm256_dpwssd_epi32(A, B, C) \ + ((__m256i) __builtin_ia32_vpdpwssd_v8si ((__v8si) (A), \ + (__v8si) (B), \ + (__v8si) (C))) + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_dpwssd_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpdpwssd_v8si_mask ((__v8si)__A, (__v8si) __C, + (__v8si) __D, (__mmask8)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_dpwssd_epi32 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpdpwssd_v8si_maskz ((__v8si)__B, + (__v8si) __C, (__v8si) __D, (__mmask8)__A); +} + +#define _mm_dpwssd_epi32(A, B, C) \ + ((__m128i) __builtin_ia32_vpdpwssd_v4si ((__v4si) (A), \ + (__v4si) (B), \ + (__v4si) (C))) + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_dpwssd_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpdpwssd_v4si_mask ((__v4si)__A, (__v4si) __C, + (__v4si) __D, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_dpwssd_epi32 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpdpwssd_v4si_maskz ((__v4si)__B, + (__v4si) __C, (__v4si) __D, (__mmask8)__A); +} + +#define _mm256_dpwssds_epi32(A, B, C) \ + ((__m256i) __builtin_ia32_vpdpwssds_v8si ((__v8si) (A), \ + (__v8si) (B), \ + (__v8si) (C))) + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_dpwssds_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D) +{ + return (__m256i)__builtin_ia32_vpdpwssds_v8si_mask ((__v8si)__A, + (__v8si) __C, (__v8si) __D, (__mmask8)__B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_dpwssds_epi32 (__mmask8 __A, __m256i __B, __m256i __C, + __m256i __D) +{ + return (__m256i)__builtin_ia32_vpdpwssds_v8si_maskz ((__v8si)__B, + (__v8si) __C, (__v8si) __D, (__mmask8)__A); +} + +#define _mm_dpwssds_epi32(A, B, C) \ + ((__m128i) __builtin_ia32_vpdpwssds_v4si ((__v4si) (A), \ + (__v4si) (B), \ + (__v4si) (C))) + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_dpwssds_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpdpwssds_v4si_mask ((__v4si)__A, + (__v4si) __C, (__v4si) __D, (__mmask8)__B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_dpwssds_epi32 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D) +{ + return (__m128i)__builtin_ia32_vpdpwssds_v4si_maskz ((__v4si)__B, + (__v4si) __C, (__v4si) __D, (__mmask8)__A); +} +#ifdef __DISABLE_AVX512VNNIVL__ +#undef __DISABLE_AVX512VNNIVL__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VNNIVL__ */ +#endif /* __DISABLE_AVX512VNNIVL__ */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vp2intersectintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vp2intersectintrin.h new file mode 100644 index 0000000..16b4991 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vp2intersectintrin.h @@ -0,0 +1,58 @@ +/* Copyright (C) 2019-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VP2INTERSECTINTRIN_H_INCLUDED +#define _AVX512VP2INTERSECTINTRIN_H_INCLUDED + +#if !defined(__AVX512VP2INTERSECT__) +#pragma GCC push_options +#pragma GCC target("avx512vp2intersect") +#define __DISABLE_AVX512VP2INTERSECT__ +#endif /* __AVX512VP2INTERSECT__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_2intersect_epi32 (__m512i __A, __m512i __B, __mmask16 *__U, + __mmask16 *__M) +{ + __builtin_ia32_2intersectd512 (__U, __M, (__v16si) __A, (__v16si) __B); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_2intersect_epi64 (__m512i __A, __m512i __B, __mmask8 *__U, + __mmask8 *__M) +{ + __builtin_ia32_2intersectq512 (__U, __M, (__v8di) __A, (__v8di) __B); +} + +#ifdef __DISABLE_AVX512VP2INTERSECT__ +#undef __DISABLE_AVX512VP2INTERSECT__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VP2INTERSECT__ */ + +#endif /* _AVX512VP2INTERSECTINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vp2intersectvlintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vp2intersectvlintrin.h new file mode 100644 index 0000000..1eb7de6 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vp2intersectvlintrin.h @@ -0,0 +1,72 @@ +/* Copyright (C) 2019-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VP2INTERSECTVLINTRIN_H_INCLUDED +#define _AVX512VP2INTERSECTVLINTRIN_H_INCLUDED + +#if !defined(__AVX512VP2INTERSECT__) || !defined(__AVX512VL__) +#pragma GCC push_options +#pragma GCC target("avx512vp2intersect,avx512vl") +#define __DISABLE_AVX512VP2INTERSECTVL__ +#endif /* __AVX512VP2INTERSECTVL__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_2intersect_epi32 (__m128i __A, __m128i __B, __mmask8 *__U, __mmask8 *__M) +{ + __builtin_ia32_2intersectd128 (__U, __M, (__v4si) __A, (__v4si) __B); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_2intersect_epi32 (__m256i __A, __m256i __B, __mmask8 *__U, + __mmask8 *__M) +{ + __builtin_ia32_2intersectd256 (__U, __M, (__v8si) __A, (__v8si) __B); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_2intersect_epi64 (__m128i __A, __m128i __B, __mmask8 *__U, __mmask8 *__M) +{ + __builtin_ia32_2intersectq128 (__U, __M, (__v2di) __A, (__v2di) __B); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_2intersect_epi64 (__m256i __A, __m256i __B, __mmask8 *__U, + __mmask8 *__M) +{ + __builtin_ia32_2intersectq256 (__U, __M, (__v4di) __A, (__v4di) __B); +} + +#ifdef __DISABLE_AVX512VP2INTERSECTVL__ +#undef __DISABLE_AVX512VP2INTERSECTVL__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VP2INTERSECTVL__ */ + +#endif /* _AVX512VP2INTERSECTVLINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vpopcntdqintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vpopcntdqintrin.h new file mode 100644 index 0000000..cdf58cc --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vpopcntdqintrin.h @@ -0,0 +1,94 @@ +/* Copyright (C) 2017-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _IMMINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _AVX512VPOPCNTDQINTRIN_H_INCLUDED +#define _AVX512VPOPCNTDQINTRIN_H_INCLUDED + +#ifndef __AVX512VPOPCNTDQ__ +#pragma GCC push_options +#pragma GCC target("avx512vpopcntdq") +#define __DISABLE_AVX512VPOPCNTDQ__ +#endif /* __AVX512VPOPCNTDQ__ */ + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_popcnt_epi32 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcountd_v16si ((__v16si) __A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_popcnt_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcountd_v16si_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_popcnt_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcountd_v16si_mask ((__v16si) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_popcnt_epi64 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcountq_v8di ((__v8di) __A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_popcnt_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcountq_v8di_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_popcnt_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcountq_v8di_mask ((__v8di) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +#ifdef __DISABLE_AVX512VPOPCNTDQ__ +#undef __DISABLE_AVX512VPOPCNTDQ__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VPOPCNTDQ__ */ + +#endif /* _AVX512VPOPCNTDQINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vpopcntdqvlintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vpopcntdqvlintrin.h new file mode 100644 index 0000000..1316217 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avx512vpopcntdqvlintrin.h @@ -0,0 +1,146 @@ +/* Copyright (C) 2017-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _IMMINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _AVX512VPOPCNTDQVLINTRIN_H_INCLUDED +#define _AVX512VPOPCNTDQVLINTRIN_H_INCLUDED + +#if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512VL__) +#pragma GCC push_options +#pragma GCC target("avx512vpopcntdq,avx512vl") +#define __DISABLE_AVX512VPOPCNTDQVL__ +#endif /* __AVX512VPOPCNTDQVL__ */ + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_popcnt_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcountd_v4si ((__v4si) __A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_popcnt_epi32 (__m128i __W, __mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcountd_v4si_mask ((__v4si) __A, + (__v4si) __W, + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_popcnt_epi32 (__mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcountd_v4si_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_popcnt_epi32 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcountd_v8si ((__v8si) __A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_popcnt_epi32 (__m256i __W, __mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcountd_v8si_mask ((__v8si) __A, + (__v8si) __W, + (__mmask16) __U); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_popcnt_epi32 (__mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcountd_v8si_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_popcnt_epi64 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcountq_v2di ((__v2di) __A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_popcnt_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcountq_v2di_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_popcnt_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcountq_v2di_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_popcnt_epi64 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcountq_v4di ((__v4di) __A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_popcnt_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcountq_v4di_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_popcnt_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcountq_v4di_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +#ifdef __DISABLE_AVX512VPOPCNTDQVL__ +#undef __DISABLE_AVX512VPOPCNTDQVL__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX512VPOPCNTDQVL__ */ + +#endif /* _AVX512VPOPCNTDQVLINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avxintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avxintrin.h new file mode 100644 index 0000000..c76d600 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avxintrin.h @@ -0,0 +1,1598 @@ +/* Copyright (C) 2008-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 11.0. */ + +#ifndef _IMMINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _AVXINTRIN_H_INCLUDED +#define _AVXINTRIN_H_INCLUDED + +#ifndef __AVX__ +#pragma GCC push_options +#pragma GCC target("avx") +#define __DISABLE_AVX__ +#endif /* __AVX__ */ + +/* Internal data types for implementing the intrinsics. */ +typedef double __v4df __attribute__ ((__vector_size__ (32))); +typedef float __v8sf __attribute__ ((__vector_size__ (32))); +typedef long long __v4di __attribute__ ((__vector_size__ (32))); +typedef unsigned long long __v4du __attribute__ ((__vector_size__ (32))); +typedef int __v8si __attribute__ ((__vector_size__ (32))); +typedef unsigned int __v8su __attribute__ ((__vector_size__ (32))); +typedef short __v16hi __attribute__ ((__vector_size__ (32))); +typedef unsigned short __v16hu __attribute__ ((__vector_size__ (32))); +typedef char __v32qi __attribute__ ((__vector_size__ (32))); +typedef signed char __v32qs __attribute__ ((__vector_size__ (32))); +typedef unsigned char __v32qu __attribute__ ((__vector_size__ (32))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef float __m256 __attribute__ ((__vector_size__ (32), + __may_alias__)); +typedef long long __m256i __attribute__ ((__vector_size__ (32), + __may_alias__)); +typedef double __m256d __attribute__ ((__vector_size__ (32), + __may_alias__)); + +/* Unaligned version of the same types. */ +typedef float __m256_u __attribute__ ((__vector_size__ (32), + __may_alias__, + __aligned__ (1))); +typedef long long __m256i_u __attribute__ ((__vector_size__ (32), + __may_alias__, + __aligned__ (1))); +typedef double __m256d_u __attribute__ ((__vector_size__ (32), + __may_alias__, + __aligned__ (1))); + +/* Compare predicates for scalar and packed compare intrinsics. */ + +/* Equal (ordered, non-signaling) */ +#define _CMP_EQ_OQ 0x00 +/* Less-than (ordered, signaling) */ +#define _CMP_LT_OS 0x01 +/* Less-than-or-equal (ordered, signaling) */ +#define _CMP_LE_OS 0x02 +/* Unordered (non-signaling) */ +#define _CMP_UNORD_Q 0x03 +/* Not-equal (unordered, non-signaling) */ +#define _CMP_NEQ_UQ 0x04 +/* Not-less-than (unordered, signaling) */ +#define _CMP_NLT_US 0x05 +/* Not-less-than-or-equal (unordered, signaling) */ +#define _CMP_NLE_US 0x06 +/* Ordered (nonsignaling) */ +#define _CMP_ORD_Q 0x07 +/* Equal (unordered, non-signaling) */ +#define _CMP_EQ_UQ 0x08 +/* Not-greater-than-or-equal (unordered, signaling) */ +#define _CMP_NGE_US 0x09 +/* Not-greater-than (unordered, signaling) */ +#define _CMP_NGT_US 0x0a +/* False (ordered, non-signaling) */ +#define _CMP_FALSE_OQ 0x0b +/* Not-equal (ordered, non-signaling) */ +#define _CMP_NEQ_OQ 0x0c +/* Greater-than-or-equal (ordered, signaling) */ +#define _CMP_GE_OS 0x0d +/* Greater-than (ordered, signaling) */ +#define _CMP_GT_OS 0x0e +/* True (unordered, non-signaling) */ +#define _CMP_TRUE_UQ 0x0f +/* Equal (ordered, signaling) */ +#define _CMP_EQ_OS 0x10 +/* Less-than (ordered, non-signaling) */ +#define _CMP_LT_OQ 0x11 +/* Less-than-or-equal (ordered, non-signaling) */ +#define _CMP_LE_OQ 0x12 +/* Unordered (signaling) */ +#define _CMP_UNORD_S 0x13 +/* Not-equal (unordered, signaling) */ +#define _CMP_NEQ_US 0x14 +/* Not-less-than (unordered, non-signaling) */ +#define _CMP_NLT_UQ 0x15 +/* Not-less-than-or-equal (unordered, non-signaling) */ +#define _CMP_NLE_UQ 0x16 +/* Ordered (signaling) */ +#define _CMP_ORD_S 0x17 +/* Equal (unordered, signaling) */ +#define _CMP_EQ_US 0x18 +/* Not-greater-than-or-equal (unordered, non-signaling) */ +#define _CMP_NGE_UQ 0x19 +/* Not-greater-than (unordered, non-signaling) */ +#define _CMP_NGT_UQ 0x1a +/* False (ordered, signaling) */ +#define _CMP_FALSE_OS 0x1b +/* Not-equal (ordered, signaling) */ +#define _CMP_NEQ_OS 0x1c +/* Greater-than-or-equal (ordered, non-signaling) */ +#define _CMP_GE_OQ 0x1d +/* Greater-than (ordered, non-signaling) */ +#define _CMP_GT_OQ 0x1e +/* True (unordered, signaling) */ +#define _CMP_TRUE_US 0x1f + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_add_pd (__m256d __A, __m256d __B) +{ + return (__m256d) ((__v4df)__A + (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_add_ps (__m256 __A, __m256 __B) +{ + return (__m256) ((__v8sf)__A + (__v8sf)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_addsub_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_addsubpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_addsub_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_addsubps256 ((__v8sf)__A, (__v8sf)__B); +} + + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_and_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_andpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_and_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_andps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_andnot_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_andnpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_andnot_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_andnps256 ((__v8sf)__A, (__v8sf)__B); +} + +/* Double/single precision floating point blend instructions - select + data from 2 sources using constant/variable mask. */ + +#ifdef __OPTIMIZE__ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_blend_pd (__m256d __X, __m256d __Y, const int __M) +{ + return (__m256d) __builtin_ia32_blendpd256 ((__v4df)__X, + (__v4df)__Y, + __M); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_blend_ps (__m256 __X, __m256 __Y, const int __M) +{ + return (__m256) __builtin_ia32_blendps256 ((__v8sf)__X, + (__v8sf)__Y, + __M); +} +#else +#define _mm256_blend_pd(X, Y, M) \ + ((__m256d) __builtin_ia32_blendpd256 ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), (int)(M))) + +#define _mm256_blend_ps(X, Y, M) \ + ((__m256) __builtin_ia32_blendps256 ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), (int)(M))) +#endif + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_blendv_pd (__m256d __X, __m256d __Y, __m256d __M) +{ + return (__m256d) __builtin_ia32_blendvpd256 ((__v4df)__X, + (__v4df)__Y, + (__v4df)__M); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_blendv_ps (__m256 __X, __m256 __Y, __m256 __M) +{ + return (__m256) __builtin_ia32_blendvps256 ((__v8sf)__X, + (__v8sf)__Y, + (__v8sf)__M); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_div_pd (__m256d __A, __m256d __B) +{ + return (__m256d) ((__v4df)__A / (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_div_ps (__m256 __A, __m256 __B) +{ + return (__m256) ((__v8sf)__A / (__v8sf)__B); +} + +/* Dot product instructions with mask-defined summing and zeroing parts + of result. */ + +#ifdef __OPTIMIZE__ +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_dp_ps (__m256 __X, __m256 __Y, const int __M) +{ + return (__m256) __builtin_ia32_dpps256 ((__v8sf)__X, + (__v8sf)__Y, + __M); +} +#else +#define _mm256_dp_ps(X, Y, M) \ + ((__m256) __builtin_ia32_dpps256 ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), (int)(M))) +#endif + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hadd_pd (__m256d __X, __m256d __Y) +{ + return (__m256d) __builtin_ia32_haddpd256 ((__v4df)__X, (__v4df)__Y); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hadd_ps (__m256 __X, __m256 __Y) +{ + return (__m256) __builtin_ia32_haddps256 ((__v8sf)__X, (__v8sf)__Y); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hsub_pd (__m256d __X, __m256d __Y) +{ + return (__m256d) __builtin_ia32_hsubpd256 ((__v4df)__X, (__v4df)__Y); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_hsub_ps (__m256 __X, __m256 __Y) +{ + return (__m256) __builtin_ia32_hsubps256 ((__v8sf)__X, (__v8sf)__Y); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_maxpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_max_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_maxps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_minpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_min_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_minps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mul_pd (__m256d __A, __m256d __B) +{ + return (__m256d) ((__v4df)__A * (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mul_ps (__m256 __A, __m256 __B) +{ + return (__m256) ((__v8sf)__A * (__v8sf)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_or_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_orpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_or_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_orps256 ((__v8sf)__A, (__v8sf)__B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shuffle_pd (__m256d __A, __m256d __B, const int __mask) +{ + return (__m256d) __builtin_ia32_shufpd256 ((__v4df)__A, (__v4df)__B, + __mask); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_shuffle_ps (__m256 __A, __m256 __B, const int __mask) +{ + return (__m256) __builtin_ia32_shufps256 ((__v8sf)__A, (__v8sf)__B, + __mask); +} +#else +#define _mm256_shuffle_pd(A, B, N) \ + ((__m256d)__builtin_ia32_shufpd256 ((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(N))) + +#define _mm256_shuffle_ps(A, B, N) \ + ((__m256) __builtin_ia32_shufps256 ((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(N))) +#endif + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sub_pd (__m256d __A, __m256d __B) +{ + return (__m256d) ((__v4df)__A - (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sub_ps (__m256 __A, __m256 __B) +{ + return (__m256) ((__v8sf)__A - (__v8sf)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_xor_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_xorpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_xor_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_xorps256 ((__v8sf)__A, (__v8sf)__B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_pd (__m128d __X, __m128d __Y, const int __P) +{ + return (__m128d) __builtin_ia32_cmppd ((__v2df)__X, (__v2df)__Y, __P); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_ps (__m128 __X, __m128 __Y, const int __P) +{ + return (__m128) __builtin_ia32_cmpps ((__v4sf)__X, (__v4sf)__Y, __P); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_pd (__m256d __X, __m256d __Y, const int __P) +{ + return (__m256d) __builtin_ia32_cmppd256 ((__v4df)__X, (__v4df)__Y, + __P); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmp_ps (__m256 __X, __m256 __Y, const int __P) +{ + return (__m256) __builtin_ia32_cmpps256 ((__v8sf)__X, (__v8sf)__Y, + __P); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_sd (__m128d __X, __m128d __Y, const int __P) +{ + return (__m128d) __builtin_ia32_cmpsd ((__v2df)__X, (__v2df)__Y, __P); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmp_ss (__m128 __X, __m128 __Y, const int __P) +{ + return (__m128) __builtin_ia32_cmpss ((__v4sf)__X, (__v4sf)__Y, __P); +} +#else +#define _mm_cmp_pd(X, Y, P) \ + ((__m128d) __builtin_ia32_cmppd ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P))) + +#define _mm_cmp_ps(X, Y, P) \ + ((__m128) __builtin_ia32_cmpps ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P))) + +#define _mm256_cmp_pd(X, Y, P) \ + ((__m256d) __builtin_ia32_cmppd256 ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), (int)(P))) + +#define _mm256_cmp_ps(X, Y, P) \ + ((__m256) __builtin_ia32_cmpps256 ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), (int)(P))) + +#define _mm_cmp_sd(X, Y, P) \ + ((__m128d) __builtin_ia32_cmpsd ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P))) + +#define _mm_cmp_ss(X, Y, P) \ + ((__m128) __builtin_ia32_cmpss ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P))) +#endif + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtsi256_si32 (__m256i __A) +{ + __v8si __B = (__v8si) __A; + return __B[0]; +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi32_pd (__m128i __A) +{ + return (__m256d)__builtin_ia32_cvtdq2pd256 ((__v4si) __A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtepi32_ps (__m256i __A) +{ + return (__m256)__builtin_ia32_cvtdq2ps256 ((__v8si) __A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtpd_ps (__m256d __A) +{ + return (__m128)__builtin_ia32_cvtpd2ps256 ((__v4df) __A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtps_epi32 (__m256 __A) +{ + return (__m256i)__builtin_ia32_cvtps2dq256 ((__v8sf) __A); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtps_pd (__m128 __A) +{ + return (__m256d)__builtin_ia32_cvtps2pd256 ((__v4sf) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttpd_epi32 (__m256d __A) +{ + return (__m128i)__builtin_ia32_cvttpd2dq256 ((__v4df) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtpd_epi32 (__m256d __A) +{ + return (__m128i)__builtin_ia32_cvtpd2dq256 ((__v4df) __A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvttps_epi32 (__m256 __A) +{ + return (__m256i)__builtin_ia32_cvttps2dq256 ((__v8sf) __A); +} + +extern __inline double +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtsd_f64 (__m256d __A) +{ + return __A[0]; +} + +extern __inline float +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtss_f32 (__m256 __A) +{ + return __A[0]; +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extractf128_pd (__m256d __X, const int __N) +{ + return (__m128d) __builtin_ia32_vextractf128_pd256 ((__v4df)__X, __N); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extractf128_ps (__m256 __X, const int __N) +{ + return (__m128) __builtin_ia32_vextractf128_ps256 ((__v8sf)__X, __N); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extractf128_si256 (__m256i __X, const int __N) +{ + return (__m128i) __builtin_ia32_vextractf128_si256 ((__v8si)__X, __N); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extract_epi32 (__m256i __X, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 2); + return _mm_extract_epi32 (__Y, __N % 4); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extract_epi16 (__m256i __X, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 3); + return _mm_extract_epi16 (__Y, __N % 8); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extract_epi8 (__m256i __X, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 4); + return _mm_extract_epi8 (__Y, __N % 16); +} + +#ifdef __x86_64__ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_extract_epi64 (__m256i __X, const int __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 1); + return _mm_extract_epi64 (__Y, __N % 2); +} +#endif +#else +#define _mm256_extractf128_pd(X, N) \ + ((__m128d) __builtin_ia32_vextractf128_pd256 ((__v4df)(__m256d)(X), \ + (int)(N))) + +#define _mm256_extractf128_ps(X, N) \ + ((__m128) __builtin_ia32_vextractf128_ps256 ((__v8sf)(__m256)(X), \ + (int)(N))) + +#define _mm256_extractf128_si256(X, N) \ + ((__m128i) __builtin_ia32_vextractf128_si256 ((__v8si)(__m256i)(X), \ + (int)(N))) + +#define _mm256_extract_epi32(X, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 2); \ + _mm_extract_epi32 (__Y, (N) % 4); \ + })) + +#define _mm256_extract_epi16(X, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 3); \ + _mm_extract_epi16 (__Y, (N) % 8); \ + })) + +#define _mm256_extract_epi8(X, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 4); \ + _mm_extract_epi8 (__Y, (N) % 16); \ + })) + +#ifdef __x86_64__ +#define _mm256_extract_epi64(X, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 1); \ + _mm_extract_epi64 (__Y, (N) % 2); \ + })) +#endif +#endif + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_zeroall (void) +{ + __builtin_ia32_vzeroall (); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_zeroupper (void) +{ + __builtin_ia32_vzeroupper (); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutevar_pd (__m128d __A, __m128i __C) +{ + return (__m128d) __builtin_ia32_vpermilvarpd ((__v2df)__A, + (__v2di)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutevar_pd (__m256d __A, __m256i __C) +{ + return (__m256d) __builtin_ia32_vpermilvarpd256 ((__v4df)__A, + (__v4di)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permutevar_ps (__m128 __A, __m128i __C) +{ + return (__m128) __builtin_ia32_vpermilvarps ((__v4sf)__A, + (__v4si)__C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permutevar_ps (__m256 __A, __m256i __C) +{ + return (__m256) __builtin_ia32_vpermilvarps256 ((__v8sf)__A, + (__v8si)__C); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permute_pd (__m128d __X, const int __C) +{ + return (__m128d) __builtin_ia32_vpermilpd ((__v2df)__X, __C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute_pd (__m256d __X, const int __C) +{ + return (__m256d) __builtin_ia32_vpermilpd256 ((__v4df)__X, __C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permute_ps (__m128 __X, const int __C) +{ + return (__m128) __builtin_ia32_vpermilps ((__v4sf)__X, __C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute_ps (__m256 __X, const int __C) +{ + return (__m256) __builtin_ia32_vpermilps256 ((__v8sf)__X, __C); +} +#else +#define _mm_permute_pd(X, C) \ + ((__m128d) __builtin_ia32_vpermilpd ((__v2df)(__m128d)(X), (int)(C))) + +#define _mm256_permute_pd(X, C) \ + ((__m256d) __builtin_ia32_vpermilpd256 ((__v4df)(__m256d)(X), (int)(C))) + +#define _mm_permute_ps(X, C) \ + ((__m128) __builtin_ia32_vpermilps ((__v4sf)(__m128)(X), (int)(C))) + +#define _mm256_permute_ps(X, C) \ + ((__m256) __builtin_ia32_vpermilps256 ((__v8sf)(__m256)(X), (int)(C))) +#endif + +#ifdef __OPTIMIZE__ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute2f128_pd (__m256d __X, __m256d __Y, const int __C) +{ + return (__m256d) __builtin_ia32_vperm2f128_pd256 ((__v4df)__X, + (__v4df)__Y, + __C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute2f128_ps (__m256 __X, __m256 __Y, const int __C) +{ + return (__m256) __builtin_ia32_vperm2f128_ps256 ((__v8sf)__X, + (__v8sf)__Y, + __C); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute2f128_si256 (__m256i __X, __m256i __Y, const int __C) +{ + return (__m256i) __builtin_ia32_vperm2f128_si256 ((__v8si)__X, + (__v8si)__Y, + __C); +} +#else +#define _mm256_permute2f128_pd(X, Y, C) \ + ((__m256d) __builtin_ia32_vperm2f128_pd256 ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), \ + (int)(C))) + +#define _mm256_permute2f128_ps(X, Y, C) \ + ((__m256) __builtin_ia32_vperm2f128_ps256 ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), \ + (int)(C))) + +#define _mm256_permute2f128_si256(X, Y, C) \ + ((__m256i) __builtin_ia32_vperm2f128_si256 ((__v8si)(__m256i)(X), \ + (__v8si)(__m256i)(Y), \ + (int)(C))) +#endif + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_broadcast_ss (float const *__X) +{ + return (__m128) __builtin_ia32_vbroadcastss (__X); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_sd (double const *__X) +{ + return (__m256d) __builtin_ia32_vbroadcastsd256 (__X); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_ss (float const *__X) +{ + return (__m256) __builtin_ia32_vbroadcastss256 (__X); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_pd (__m128d const *__X) +{ + return (__m256d) __builtin_ia32_vbroadcastf128_pd256 (__X); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_broadcast_ps (__m128 const *__X) +{ + return (__m256) __builtin_ia32_vbroadcastf128_ps256 (__X); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insertf128_pd (__m256d __X, __m128d __Y, const int __O) +{ + return (__m256d) __builtin_ia32_vinsertf128_pd256 ((__v4df)__X, + (__v2df)__Y, + __O); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insertf128_ps (__m256 __X, __m128 __Y, const int __O) +{ + return (__m256) __builtin_ia32_vinsertf128_ps256 ((__v8sf)__X, + (__v4sf)__Y, + __O); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insertf128_si256 (__m256i __X, __m128i __Y, const int __O) +{ + return (__m256i) __builtin_ia32_vinsertf128_si256 ((__v8si)__X, + (__v4si)__Y, + __O); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insert_epi32 (__m256i __X, int __D, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 2); + __Y = _mm_insert_epi32 (__Y, __D, __N % 4); + return _mm256_insertf128_si256 (__X, __Y, __N >> 2); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insert_epi16 (__m256i __X, int __D, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 3); + __Y = _mm_insert_epi16 (__Y, __D, __N % 8); + return _mm256_insertf128_si256 (__X, __Y, __N >> 3); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insert_epi8 (__m256i __X, int __D, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 4); + __Y = _mm_insert_epi8 (__Y, __D, __N % 16); + return _mm256_insertf128_si256 (__X, __Y, __N >> 4); +} + +#ifdef __x86_64__ +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_insert_epi64 (__m256i __X, long long __D, int const __N) +{ + __m128i __Y = _mm256_extractf128_si256 (__X, __N >> 1); + __Y = _mm_insert_epi64 (__Y, __D, __N % 2); + return _mm256_insertf128_si256 (__X, __Y, __N >> 1); +} +#endif +#else +#define _mm256_insertf128_pd(X, Y, O) \ + ((__m256d) __builtin_ia32_vinsertf128_pd256 ((__v4df)(__m256d)(X), \ + (__v2df)(__m128d)(Y), \ + (int)(O))) + +#define _mm256_insertf128_ps(X, Y, O) \ + ((__m256) __builtin_ia32_vinsertf128_ps256 ((__v8sf)(__m256)(X), \ + (__v4sf)(__m128)(Y), \ + (int)(O))) + +#define _mm256_insertf128_si256(X, Y, O) \ + ((__m256i) __builtin_ia32_vinsertf128_si256 ((__v8si)(__m256i)(X), \ + (__v4si)(__m128i)(Y), \ + (int)(O))) + +#define _mm256_insert_epi32(X, D, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 2); \ + __Y = _mm_insert_epi32 (__Y, (D), (N) % 4); \ + _mm256_insertf128_si256 ((X), __Y, (N) >> 2); \ + })) + +#define _mm256_insert_epi16(X, D, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 3); \ + __Y = _mm_insert_epi16 (__Y, (D), (N) % 8); \ + _mm256_insertf128_si256 ((X), __Y, (N) >> 3); \ + })) + +#define _mm256_insert_epi8(X, D, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 4); \ + __Y = _mm_insert_epi8 (__Y, (D), (N) % 16); \ + _mm256_insertf128_si256 ((X), __Y, (N) >> 4); \ + })) + +#ifdef __x86_64__ +#define _mm256_insert_epi64(X, D, N) \ + (__extension__ \ + ({ \ + __m128i __Y = _mm256_extractf128_si256 ((X), (N) >> 1); \ + __Y = _mm_insert_epi64 (__Y, (D), (N) % 2); \ + _mm256_insertf128_si256 ((X), __Y, (N) >> 1); \ + })) +#endif +#endif + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_load_pd (double const *__P) +{ + return *(__m256d *)__P; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_store_pd (double *__P, __m256d __A) +{ + *(__m256d *)__P = __A; +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_load_ps (float const *__P) +{ + return *(__m256 *)__P; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_store_ps (float *__P, __m256 __A) +{ + *(__m256 *)__P = __A; +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu_pd (double const *__P) +{ + return *(__m256d_u *)__P; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu_pd (double *__P, __m256d __A) +{ + *(__m256d_u *)__P = __A; +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu_ps (float const *__P) +{ + return *(__m256_u *)__P; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu_ps (float *__P, __m256 __A) +{ + *(__m256_u *)__P = __A; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_load_si256 (__m256i const *__P) +{ + return *__P; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_store_si256 (__m256i *__P, __m256i __A) +{ + *__P = __A; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu_si256 (__m256i_u const *__P) +{ + return *__P; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu_si256 (__m256i_u *__P, __m256i __A) +{ + *__P = __A; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskload_pd (double const *__P, __m128i __M) +{ + return (__m128d) __builtin_ia32_maskloadpd ((const __v2df *)__P, + (__v2di)__M); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskstore_pd (double *__P, __m128i __M, __m128d __A) +{ + __builtin_ia32_maskstorepd ((__v2df *)__P, (__v2di)__M, (__v2df)__A); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskload_pd (double const *__P, __m256i __M) +{ + return (__m256d) __builtin_ia32_maskloadpd256 ((const __v4df *)__P, + (__v4di)__M); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskstore_pd (double *__P, __m256i __M, __m256d __A) +{ + __builtin_ia32_maskstorepd256 ((__v4df *)__P, (__v4di)__M, (__v4df)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskload_ps (float const *__P, __m128i __M) +{ + return (__m128) __builtin_ia32_maskloadps ((const __v4sf *)__P, + (__v4si)__M); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskstore_ps (float *__P, __m128i __M, __m128 __A) +{ + __builtin_ia32_maskstoreps ((__v4sf *)__P, (__v4si)__M, (__v4sf)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskload_ps (float const *__P, __m256i __M) +{ + return (__m256) __builtin_ia32_maskloadps256 ((const __v8sf *)__P, + (__v8si)__M); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskstore_ps (float *__P, __m256i __M, __m256 __A) +{ + __builtin_ia32_maskstoreps256 ((__v8sf *)__P, (__v8si)__M, (__v8sf)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movehdup_ps (__m256 __X) +{ + return (__m256) __builtin_ia32_movshdup256 ((__v8sf)__X); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_moveldup_ps (__m256 __X) +{ + return (__m256) __builtin_ia32_movsldup256 ((__v8sf)__X); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movedup_pd (__m256d __X) +{ + return (__m256d) __builtin_ia32_movddup256 ((__v4df)__X); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_lddqu_si256 (__m256i const *__P) +{ + return (__m256i) __builtin_ia32_lddqu256 ((char const *)__P); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_stream_si256 (__m256i *__A, __m256i __B) +{ + __builtin_ia32_movntdq256 ((__v4di *)__A, (__v4di)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_stream_pd (double *__A, __m256d __B) +{ + __builtin_ia32_movntpd256 (__A, (__v4df)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_stream_ps (float *__P, __m256 __A) +{ + __builtin_ia32_movntps256 (__P, (__v8sf)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rcp_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_rcpps256 ((__v8sf)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_rsqrt_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_rsqrtps256 ((__v8sf)__A); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sqrt_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_sqrtpd256 ((__v4df)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_sqrt_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_sqrtps256 ((__v8sf)__A); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_round_pd (__m256d __V, const int __M) +{ + return (__m256d) __builtin_ia32_roundpd256 ((__v4df)__V, __M); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_round_ps (__m256 __V, const int __M) +{ + return (__m256) __builtin_ia32_roundps256 ((__v8sf)__V, __M); +} +#else +#define _mm256_round_pd(V, M) \ + ((__m256d) __builtin_ia32_roundpd256 ((__v4df)(__m256d)(V), (int)(M))) + +#define _mm256_round_ps(V, M) \ + ((__m256) __builtin_ia32_roundps256 ((__v8sf)(__m256)(V), (int)(M))) +#endif + +#define _mm256_ceil_pd(V) _mm256_round_pd ((V), _MM_FROUND_CEIL) +#define _mm256_floor_pd(V) _mm256_round_pd ((V), _MM_FROUND_FLOOR) +#define _mm256_ceil_ps(V) _mm256_round_ps ((V), _MM_FROUND_CEIL) +#define _mm256_floor_ps(V) _mm256_round_ps ((V), _MM_FROUND_FLOOR) + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpackhi_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_unpckhpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpacklo_pd (__m256d __A, __m256d __B) +{ + return (__m256d) __builtin_ia32_unpcklpd256 ((__v4df)__A, (__v4df)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpackhi_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_unpckhps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_unpacklo_ps (__m256 __A, __m256 __B) +{ + return (__m256) __builtin_ia32_unpcklps256 ((__v8sf)__A, (__v8sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testz_pd (__m128d __M, __m128d __V) +{ + return __builtin_ia32_vtestzpd ((__v2df)__M, (__v2df)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testc_pd (__m128d __M, __m128d __V) +{ + return __builtin_ia32_vtestcpd ((__v2df)__M, (__v2df)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testnzc_pd (__m128d __M, __m128d __V) +{ + return __builtin_ia32_vtestnzcpd ((__v2df)__M, (__v2df)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testz_ps (__m128 __M, __m128 __V) +{ + return __builtin_ia32_vtestzps ((__v4sf)__M, (__v4sf)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testc_ps (__m128 __M, __m128 __V) +{ + return __builtin_ia32_vtestcps ((__v4sf)__M, (__v4sf)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testnzc_ps (__m128 __M, __m128 __V) +{ + return __builtin_ia32_vtestnzcps ((__v4sf)__M, (__v4sf)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testz_pd (__m256d __M, __m256d __V) +{ + return __builtin_ia32_vtestzpd256 ((__v4df)__M, (__v4df)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testc_pd (__m256d __M, __m256d __V) +{ + return __builtin_ia32_vtestcpd256 ((__v4df)__M, (__v4df)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testnzc_pd (__m256d __M, __m256d __V) +{ + return __builtin_ia32_vtestnzcpd256 ((__v4df)__M, (__v4df)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testz_ps (__m256 __M, __m256 __V) +{ + return __builtin_ia32_vtestzps256 ((__v8sf)__M, (__v8sf)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testc_ps (__m256 __M, __m256 __V) +{ + return __builtin_ia32_vtestcps256 ((__v8sf)__M, (__v8sf)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testnzc_ps (__m256 __M, __m256 __V) +{ + return __builtin_ia32_vtestnzcps256 ((__v8sf)__M, (__v8sf)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testz_si256 (__m256i __M, __m256i __V) +{ + return __builtin_ia32_ptestz256 ((__v4di)__M, (__v4di)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testc_si256 (__m256i __M, __m256i __V) +{ + return __builtin_ia32_ptestc256 ((__v4di)__M, (__v4di)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_testnzc_si256 (__m256i __M, __m256i __V) +{ + return __builtin_ia32_ptestnzc256 ((__v4di)__M, (__v4di)__V); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movemask_pd (__m256d __A) +{ + return __builtin_ia32_movmskpd256 ((__v4df)__A); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_movemask_ps (__m256 __A) +{ + return __builtin_ia32_movmskps256 ((__v8sf)__A); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_undefined_pd (void) +{ + __m256d __Y = __Y; + return __Y; +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_undefined_ps (void) +{ + __m256 __Y = __Y; + return __Y; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_undefined_si256 (void) +{ + __m256i __Y = __Y; + return __Y; +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setzero_pd (void) +{ + return __extension__ (__m256d){ 0.0, 0.0, 0.0, 0.0 }; +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setzero_ps (void) +{ + return __extension__ (__m256){ 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0 }; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setzero_si256 (void) +{ + return __extension__ (__m256i)(__v4di){ 0, 0, 0, 0 }; +} + +/* Create the vector [A B C D]. */ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_pd (double __A, double __B, double __C, double __D) +{ + return __extension__ (__m256d){ __D, __C, __B, __A }; +} + +/* Create the vector [A B C D E F G H]. */ +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_ps (float __A, float __B, float __C, float __D, + float __E, float __F, float __G, float __H) +{ + return __extension__ (__m256){ __H, __G, __F, __E, + __D, __C, __B, __A }; +} + +/* Create the vector [A B C D E F G H]. */ +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_epi32 (int __A, int __B, int __C, int __D, + int __E, int __F, int __G, int __H) +{ + return __extension__ (__m256i)(__v8si){ __H, __G, __F, __E, + __D, __C, __B, __A }; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_epi16 (short __q15, short __q14, short __q13, short __q12, + short __q11, short __q10, short __q09, short __q08, + short __q07, short __q06, short __q05, short __q04, + short __q03, short __q02, short __q01, short __q00) +{ + return __extension__ (__m256i)(__v16hi){ + __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15 + }; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_epi8 (char __q31, char __q30, char __q29, char __q28, + char __q27, char __q26, char __q25, char __q24, + char __q23, char __q22, char __q21, char __q20, + char __q19, char __q18, char __q17, char __q16, + char __q15, char __q14, char __q13, char __q12, + char __q11, char __q10, char __q09, char __q08, + char __q07, char __q06, char __q05, char __q04, + char __q03, char __q02, char __q01, char __q00) +{ + return __extension__ (__m256i)(__v32qi){ + __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15, + __q16, __q17, __q18, __q19, __q20, __q21, __q22, __q23, + __q24, __q25, __q26, __q27, __q28, __q29, __q30, __q31 + }; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_epi64x (long long __A, long long __B, long long __C, + long long __D) +{ + return __extension__ (__m256i)(__v4di){ __D, __C, __B, __A }; +} + +/* Create a vector with all elements equal to A. */ +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_pd (double __A) +{ + return __extension__ (__m256d){ __A, __A, __A, __A }; +} + +/* Create a vector with all elements equal to A. */ +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_ps (float __A) +{ + return __extension__ (__m256){ __A, __A, __A, __A, + __A, __A, __A, __A }; +} + +/* Create a vector with all elements equal to A. */ +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_epi32 (int __A) +{ + return __extension__ (__m256i)(__v8si){ __A, __A, __A, __A, + __A, __A, __A, __A }; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_epi16 (short __A) +{ + return _mm256_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_epi8 (char __A) +{ + return _mm256_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set1_epi64x (long long __A) +{ + return __extension__ (__m256i)(__v4di){ __A, __A, __A, __A }; +} + +/* Create vectors of elements in the reversed order from the + _mm256_set_XXX functions. */ + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_pd (double __A, double __B, double __C, double __D) +{ + return _mm256_set_pd (__D, __C, __B, __A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_ps (float __A, float __B, float __C, float __D, + float __E, float __F, float __G, float __H) +{ + return _mm256_set_ps (__H, __G, __F, __E, __D, __C, __B, __A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_epi32 (int __A, int __B, int __C, int __D, + int __E, int __F, int __G, int __H) +{ + return _mm256_set_epi32 (__H, __G, __F, __E, __D, __C, __B, __A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_epi16 (short __q15, short __q14, short __q13, short __q12, + short __q11, short __q10, short __q09, short __q08, + short __q07, short __q06, short __q05, short __q04, + short __q03, short __q02, short __q01, short __q00) +{ + return _mm256_set_epi16 (__q00, __q01, __q02, __q03, + __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, + __q12, __q13, __q14, __q15); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_epi8 (char __q31, char __q30, char __q29, char __q28, + char __q27, char __q26, char __q25, char __q24, + char __q23, char __q22, char __q21, char __q20, + char __q19, char __q18, char __q17, char __q16, + char __q15, char __q14, char __q13, char __q12, + char __q11, char __q10, char __q09, char __q08, + char __q07, char __q06, char __q05, char __q04, + char __q03, char __q02, char __q01, char __q00) +{ + return _mm256_set_epi8 (__q00, __q01, __q02, __q03, + __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, + __q12, __q13, __q14, __q15, + __q16, __q17, __q18, __q19, + __q20, __q21, __q22, __q23, + __q24, __q25, __q26, __q27, + __q28, __q29, __q30, __q31); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_epi64x (long long __A, long long __B, long long __C, + long long __D) +{ + return _mm256_set_epi64x (__D, __C, __B, __A); +} + +/* Casts between various SP, DP, INT vector types. Note that these do no + conversion of values, they just change the type. */ +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castpd_ps (__m256d __A) +{ + return (__m256) __A; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castpd_si256 (__m256d __A) +{ + return (__m256i) __A; +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castps_pd (__m256 __A) +{ + return (__m256d) __A; +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castps_si256(__m256 __A) +{ + return (__m256i) __A; +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castsi256_ps (__m256i __A) +{ + return (__m256) __A; +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castsi256_pd (__m256i __A) +{ + return (__m256d) __A; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castpd256_pd128 (__m256d __A) +{ + return (__m128d) __builtin_ia32_pd_pd256 ((__v4df)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castps256_ps128 (__m256 __A) +{ + return (__m128) __builtin_ia32_ps_ps256 ((__v8sf)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castsi256_si128 (__m256i __A) +{ + return (__m128i) __builtin_ia32_si_si256 ((__v8si)__A); +} + +/* When cast is done from a 128 to 256-bit type, the low 128 bits of + the 256-bit result contain source parameter value and the upper 128 + bits of the result are undefined. Those intrinsics shouldn't + generate any extra moves. */ + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castpd128_pd256 (__m128d __A) +{ + return (__m256d) __builtin_ia32_pd256_pd ((__v2df)__A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castps128_ps256 (__m128 __A) +{ + return (__m256) __builtin_ia32_ps256_ps ((__v4sf)__A); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_castsi128_si256 (__m128i __A) +{ + return (__m256i) __builtin_ia32_si256_si ((__v4si)__A); +} + +/* Similarly, but with zero extension instead of undefined values. */ + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_zextpd128_pd256 (__m128d __A) +{ + return _mm256_insertf128_pd (_mm256_setzero_pd (), __A, 0); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_zextps128_ps256 (__m128 __A) +{ + return _mm256_insertf128_ps (_mm256_setzero_ps (), __A, 0); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_zextsi128_si256 (__m128i __A) +{ + return _mm256_insertf128_si256 (_mm256_setzero_si256 (), __A, 0); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_m128 ( __m128 __H, __m128 __L) +{ + return _mm256_insertf128_ps (_mm256_castps128_ps256 (__L), __H, 1); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_m128d (__m128d __H, __m128d __L) +{ + return _mm256_insertf128_pd (_mm256_castpd128_pd256 (__L), __H, 1); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_set_m128i (__m128i __H, __m128i __L) +{ + return _mm256_insertf128_si256 (_mm256_castsi128_si256 (__L), __H, 1); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_m128 (__m128 __L, __m128 __H) +{ + return _mm256_set_m128 (__H, __L); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_m128d (__m128d __L, __m128d __H) +{ + return _mm256_set_m128d (__H, __L); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_setr_m128i (__m128i __L, __m128i __H) +{ + return _mm256_set_m128i (__H, __L); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu2_m128 (float const *__PH, float const *__PL) +{ + return _mm256_insertf128_ps (_mm256_castps128_ps256 (_mm_loadu_ps (__PL)), + _mm_loadu_ps (__PH), 1); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu2_m128 (float *__PH, float *__PL, __m256 __A) +{ + _mm_storeu_ps (__PL, _mm256_castps256_ps128 (__A)); + _mm_storeu_ps (__PH, _mm256_extractf128_ps (__A, 1)); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu2_m128d (double const *__PH, double const *__PL) +{ + return _mm256_insertf128_pd (_mm256_castpd128_pd256 (_mm_loadu_pd (__PL)), + _mm_loadu_pd (__PH), 1); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu2_m128d (double *__PH, double *__PL, __m256d __A) +{ + _mm_storeu_pd (__PL, _mm256_castpd256_pd128 (__A)); + _mm_storeu_pd (__PH, _mm256_extractf128_pd (__A, 1)); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_loadu2_m128i (__m128i_u const *__PH, __m128i_u const *__PL) +{ + return _mm256_insertf128_si256 (_mm256_castsi128_si256 (_mm_loadu_si128 (__PL)), + _mm_loadu_si128 (__PH), 1); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_storeu2_m128i (__m128i_u *__PH, __m128i_u *__PL, __m256i __A) +{ + _mm_storeu_si128 (__PL, _mm256_castsi256_si128 (__A)); + _mm_storeu_si128 (__PH, _mm256_extractf128_si256 (__A, 1)); +} + +#ifdef __DISABLE_AVX__ +#undef __DISABLE_AVX__ +#pragma GCC pop_options +#endif /* __DISABLE_AVX__ */ + +#endif /* _AVXINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/avxvnniintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/avxvnniintrin.h new file mode 100644 index 0000000..4537ff0 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/avxvnniintrin.h @@ -0,0 +1,113 @@ +/* Copyright (C) 2020-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _AVXVNNIINTRIN_H_INCLUDED +#define _AVXVNNIINTRIN_H_INCLUDED + +#if !defined(__AVXVNNI__) +#pragma GCC push_options +#pragma GCC target("avxvnni") +#define __DISABLE_AVXVNNIVL__ +#endif /* __AVXVNNIVL__ */ + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_dpbusd_avx_epi32(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_vpdpbusd_v8si ((__v8si) __A, + (__v8si) __B, + (__v8si) __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_dpbusd_avx_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpdpbusd_v4si ((__v4si) __A, + (__v4si) __B, + (__v4si) __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_dpbusds_avx_epi32(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_vpdpbusds_v8si ((__v8si) __A, + (__v8si) __B, + (__v8si) __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_dpbusds_avx_epi32(__m128i __A,__m128i __B,__m128i __C) +{ + return (__m128i) __builtin_ia32_vpdpbusds_v4si ((__v4si) __A, + (__v4si) __B, + (__v4si) __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_dpwssd_avx_epi32(__m256i __A,__m256i __B,__m256i __C) +{ + return (__m256i) __builtin_ia32_vpdpwssd_v8si ((__v8si) __A, + (__v8si) __B, + (__v8si) __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_dpwssd_avx_epi32(__m128i __A,__m128i __B,__m128i __C) +{ + return (__m128i) __builtin_ia32_vpdpwssd_v4si ((__v4si) __A, + (__v4si) __B, + (__v4si) __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_dpwssds_avx_epi32(__m256i __A,__m256i __B,__m256i __C) +{ + return (__m256i) __builtin_ia32_vpdpwssds_v8si ((__v8si) __A, + (__v8si) __B, + (__v8si) __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_dpwssds_avx_epi32(__m128i __A,__m128i __B,__m128i __C) +{ + return (__m128i) __builtin_ia32_vpdpwssds_v4si ((__v4si) __A, + (__v4si) __B, + (__v4si) __C); +} + +#ifdef __DISABLE_AVXVNNIVL__ +#undef __DISABLE_AVXVNNIVL__ +#pragma GCC pop_options +#endif /* __DISABLE_AVXVNNIVL__ */ +#endif /* _AVXVNNIINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/bmi2intrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/bmi2intrin.h new file mode 100644 index 0000000..747d336 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/bmi2intrin.h @@ -0,0 +1,109 @@ +/* Copyright (C) 2011-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _BMI2INTRIN_H_INCLUDED +#define _BMI2INTRIN_H_INCLUDED + +#ifndef __BMI2__ +#pragma GCC push_options +#pragma GCC target("bmi2") +#define __DISABLE_BMI2__ +#endif /* __BMI2__ */ + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_bzhi_u32 (unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_bzhi_si (__X, __Y); +} + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_pdep_u32 (unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_pdep_si (__X, __Y); +} + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_pext_u32 (unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_pext_si (__X, __Y); +} + +#ifdef __x86_64__ + +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_bzhi_u64 (unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_bzhi_di (__X, __Y); +} + +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_pdep_u64 (unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_pdep_di (__X, __Y); +} + +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_pext_u64 (unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_pext_di (__X, __Y); +} + +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mulx_u64 (unsigned long long __X, unsigned long long __Y, + unsigned long long *__P) +{ + unsigned __int128 __res = (unsigned __int128) __X * __Y; + *__P = (unsigned long long) (__res >> 64); + return (unsigned long long) __res; +} + +#else /* !__x86_64__ */ + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mulx_u32 (unsigned int __X, unsigned int __Y, unsigned int *__P) +{ + unsigned long long __res = (unsigned long long) __X * __Y; + *__P = (unsigned int) (__res >> 32); + return (unsigned int) __res; +} + +#endif /* !__x86_64__ */ + +#ifdef __DISABLE_BMI2__ +#undef __DISABLE_BMI2__ +#pragma GCC pop_options +#endif /* __DISABLE_BMI2__ */ + +#endif /* _BMI2INTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/bmiintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/bmiintrin.h new file mode 100644 index 0000000..f0303c8 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/bmiintrin.h @@ -0,0 +1,202 @@ +/* Copyright (C) 2010-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _BMIINTRIN_H_INCLUDED +#define _BMIINTRIN_H_INCLUDED + +#ifndef __BMI__ +#pragma GCC push_options +#pragma GCC target("bmi") +#define __DISABLE_BMI__ +#endif /* __BMI__ */ + +extern __inline unsigned short __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__tzcnt_u16 (unsigned short __X) +{ + return __builtin_ia32_tzcnt_u16 (__X); +} + +extern __inline unsigned short __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_tzcnt_u16 (unsigned short __X) +{ + return __builtin_ia32_tzcnt_u16 (__X); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__andn_u32 (unsigned int __X, unsigned int __Y) +{ + return ~__X & __Y; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_andn_u32 (unsigned int __X, unsigned int __Y) +{ + return __andn_u32 (__X, __Y); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bextr_u32 (unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_bextr_u32 (__X, __Y); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_bextr_u32 (unsigned int __X, unsigned int __Y, unsigned __Z) +{ + return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8))); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsi_u32 (unsigned int __X) +{ + return __X & -__X; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_blsi_u32 (unsigned int __X) +{ + return __blsi_u32 (__X); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsmsk_u32 (unsigned int __X) +{ + return __X ^ (__X - 1); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_blsmsk_u32 (unsigned int __X) +{ + return __blsmsk_u32 (__X); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsr_u32 (unsigned int __X) +{ + return __X & (__X - 1); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_blsr_u32 (unsigned int __X) +{ + return __blsr_u32 (__X); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__tzcnt_u32 (unsigned int __X) +{ + return __builtin_ia32_tzcnt_u32 (__X); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_tzcnt_u32 (unsigned int __X) +{ + return __builtin_ia32_tzcnt_u32 (__X); +} + + +#ifdef __x86_64__ +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__andn_u64 (unsigned long long __X, unsigned long long __Y) +{ + return ~__X & __Y; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_andn_u64 (unsigned long long __X, unsigned long long __Y) +{ + return __andn_u64 (__X, __Y); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bextr_u64 (unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_bextr_u64 (__X, __Y); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_bextr_u64 (unsigned long long __X, unsigned int __Y, unsigned int __Z) +{ + return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8))); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsi_u64 (unsigned long long __X) +{ + return __X & -__X; +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_blsi_u64 (unsigned long long __X) +{ + return __blsi_u64 (__X); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsmsk_u64 (unsigned long long __X) +{ + return __X ^ (__X - 1); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_blsmsk_u64 (unsigned long long __X) +{ + return __blsmsk_u64 (__X); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsr_u64 (unsigned long long __X) +{ + return __X & (__X - 1); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_blsr_u64 (unsigned long long __X) +{ + return __blsr_u64 (__X); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__tzcnt_u64 (unsigned long long __X) +{ + return __builtin_ia32_tzcnt_u64 (__X); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_tzcnt_u64 (unsigned long long __X) +{ + return __builtin_ia32_tzcnt_u64 (__X); +} + +#endif /* __x86_64__ */ + +#ifdef __DISABLE_BMI__ +#undef __DISABLE_BMI__ +#pragma GCC pop_options +#endif /* __DISABLE_BMI__ */ + +#endif /* _BMIINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/bmmintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/bmmintrin.h new file mode 100644 index 0000000..ccc72b4 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/bmmintrin.h @@ -0,0 +1,29 @@ +/* Copyright (C) 2007-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _BMMINTRIN_H_INCLUDED +#define _BMMINTRIN_H_INCLUDED + +# error "SSE5 instruction set removed from compiler" + +#endif /* _BMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/cet.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/cet.h new file mode 100644 index 0000000..2d61e17 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/cet.h @@ -0,0 +1,93 @@ +/* ELF program property for Intel CET. + Copyright (C) 2017-2022 Free Software Foundation, Inc. + + This file is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the + Free Software Foundation; either version 3, or (at your option) any + later version. + + This file is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + */ + +/* Add x86 feature with IBT and/or SHSTK bits to ELF program property + if they are enabled. Otherwise, contents in this header file are + unused. Define _CET_ENDBR for assembly codes. _CET_ENDBR should be + placed unconditionally at the entrance of a function whose address + may be taken. */ + +#ifndef _CET_H_INCLUDED +#define _CET_H_INCLUDED + +#ifdef __ASSEMBLER__ + +# if defined __CET__ && (__CET__ & 1) != 0 +# ifdef __x86_64__ +# define _CET_ENDBR endbr64 +# else +# define _CET_ENDBR endbr32 +# endif +# else +# define _CET_ENDBR +# endif + +# ifdef __ELF__ +# ifdef __CET__ +# if (__CET__ & 1) != 0 +/* GNU_PROPERTY_X86_FEATURE_1_IBT. */ +# define __PROPERTY_IBT 0x1 +# else +# define __PROPERTY_IBT 0x0 +# endif + +# if (__CET__ & 2) != 0 +/* GNU_PROPERTY_X86_FEATURE_1_SHSTK. */ +# define __PROPERTY_SHSTK 0x2 +# else +# define __PROPERTY_SHSTK 0x0 +# endif + +# define __PROPERTY_BITS (__PROPERTY_IBT | __PROPERTY_SHSTK) + +# ifdef __LP64__ +# define __PROPERTY_ALIGN 3 +# else +# define __PROPERTY_ALIGN 2 +# endif + + .pushsection ".note.gnu.property", "a" + .p2align __PROPERTY_ALIGN + .long 1f - 0f /* name length. */ + .long 4f - 1f /* data length. */ + /* NT_GNU_PROPERTY_TYPE_0. */ + .long 5 /* note type. */ +0: + .asciz "GNU" /* vendor name. */ +1: + .p2align __PROPERTY_ALIGN + /* GNU_PROPERTY_X86_FEATURE_1_AND. */ + .long 0xc0000002 /* pr_type. */ + .long 3f - 2f /* pr_datasz. */ +2: + /* GNU_PROPERTY_X86_FEATURE_1_XXX. */ + .long __PROPERTY_BITS +3: + .p2align __PROPERTY_ALIGN +4: + .popsection +# endif /* __CET__ */ +# endif /* __ELF__ */ +#endif /* __ASSEMBLER__ */ + +#endif /* _CET_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/cetintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/cetintrin.h new file mode 100644 index 0000000..7abacb1 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/cetintrin.h @@ -0,0 +1,129 @@ +/* Copyright (C) 2015-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _CETINTRIN_H_INCLUDED +#define _CETINTRIN_H_INCLUDED + +#ifndef __SHSTK__ +#pragma GCC push_options +#pragma GCC target ("shstk") +#define __DISABLE_SHSTK__ +#endif /* __SHSTK__ */ + +#ifdef __x86_64__ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_get_ssp (void) +{ + return __builtin_ia32_rdsspq (); +} +#else +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_get_ssp (void) +{ + return __builtin_ia32_rdsspd (); +} +#endif + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_inc_ssp (unsigned int __B) +{ +#ifdef __x86_64__ + __builtin_ia32_incsspq ((unsigned long long) __B); +#else + __builtin_ia32_incsspd (__B); +#endif +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_saveprevssp (void) +{ + __builtin_ia32_saveprevssp (); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_rstorssp (void *__B) +{ + __builtin_ia32_rstorssp (__B); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_wrssd (unsigned int __B, void *__C) +{ + __builtin_ia32_wrssd (__B, __C); +} + +#ifdef __x86_64__ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_wrssq (unsigned long long __B, void *__C) +{ + __builtin_ia32_wrssq (__B, __C); +} +#endif + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_wrussd (unsigned int __B, void *__C) +{ + __builtin_ia32_wrussd (__B, __C); +} + +#ifdef __x86_64__ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_wrussq (unsigned long long __B, void *__C) +{ + __builtin_ia32_wrussq (__B, __C); +} +#endif + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_setssbsy (void) +{ + __builtin_ia32_setssbsy (); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_clrssbsy (void *__B) +{ + __builtin_ia32_clrssbsy (__B); +} + +#ifdef __DISABLE_SHSTK__ +#undef __DISABLE_SHSTK__ +#pragma GCC pop_options +#endif /* __DISABLE_SHSTK__ */ + +#endif /* _CETINTRIN_H_INCLUDED. */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/cldemoteintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/cldemoteintrin.h new file mode 100644 index 0000000..ca7bb72 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/cldemoteintrin.h @@ -0,0 +1,47 @@ +/* Copyright (C) 2018-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _CLDEMOTE_H_INCLUDED +#define _CLDEMOTE_H_INCLUDED + +#ifndef __CLDEMOTE__ +#pragma GCC push_options +#pragma GCC target("cldemote") +#define __DISABLE_CLDEMOTE__ +#endif /* __CLDEMOTE__ */ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_cldemote (void *__A) +{ + __builtin_ia32_cldemote (__A); +} +#ifdef __DISABLE_CLDEMOTE__ +#undef __DISABLE_CLDEMOTE__ +#pragma GCC pop_options +#endif /* __DISABLE_CLDEMOTE__ */ + +#endif /* _CLDEMOTE_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/clflushoptintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/clflushoptintrin.h new file mode 100644 index 0000000..c6949b5 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/clflushoptintrin.h @@ -0,0 +1,49 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _CLFLUSHOPTINTRIN_H_INCLUDED +#define _CLFLUSHOPTINTRIN_H_INCLUDED + +#ifndef __CLFLUSHOPT__ +#pragma GCC push_options +#pragma GCC target("clflushopt") +#define __DISABLE_CLFLUSHOPT__ +#endif /* __CLFLUSHOPT__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_clflushopt (void *__A) +{ + __builtin_ia32_clflushopt (__A); +} + +#ifdef __DISABLE_CLFLUSHOPT__ +#undef __DISABLE_CLFLUSHOPT__ +#pragma GCC pop_options +#endif /* __DISABLE_CLFLUSHOPT__ */ + +#endif /* _CLFLUSHOPTINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/clwbintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/clwbintrin.h new file mode 100644 index 0000000..dd84a4f --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/clwbintrin.h @@ -0,0 +1,49 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _CLWBINTRIN_H_INCLUDED +#define _CLWBINTRIN_H_INCLUDED + +#ifndef __CLWB__ +#pragma GCC push_options +#pragma GCC target("clwb") +#define __DISABLE_CLWB__ +#endif /* __CLWB__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_clwb (void *__A) +{ + __builtin_ia32_clwb (__A); +} + +#ifdef __DISABLE_CLWB__ +#undef __DISABLE_CLWB__ +#pragma GCC pop_options +#endif /* __DISABLE_CLWB__ */ + +#endif /* _CLWBINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/clzerointrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/clzerointrin.h new file mode 100644 index 0000000..ce4f286 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/clzerointrin.h @@ -0,0 +1,44 @@ +/* Copyright (C) 2012-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _CLZEROINTRIN_H_INCLUDED +#define _CLZEROINTRIN_H_INCLUDED + +#ifndef __CLZERO__ +#pragma GCC push_options +#pragma GCC target("clzero") +#define __DISABLE_CLZERO__ +#endif /* __CLZERO__ */ + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_clzero (void * __I) +{ + __builtin_ia32_clzero (__I); +} + +#ifdef __DISABLE_CLZERO__ +#undef __DISABLE_CLZERO__ +#pragma GCC pop_options +#endif /* __DISABLE_CLZERO__ */ + +#endif /* _CLZEROINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/cpuid.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/cpuid.h new file mode 100644 index 0000000..8b3dc2b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/cpuid.h @@ -0,0 +1,336 @@ +/* + * Copyright (C) 2007-2022 Free Software Foundation, Inc. + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 3, or (at your option) any + * later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Under Section 7 of GPL version 3, you are granted additional + * permissions described in the GCC Runtime Library Exception, version + * 3.1, as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License and + * a copy of the GCC Runtime Library Exception along with this program; + * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + * . + */ + +#ifndef _CPUID_H_INCLUDED +#define _CPUID_H_INCLUDED + +/* %eax */ +#define bit_AVXVNNI (1 << 4) +#define bit_AVX512BF16 (1 << 5) +#define bit_HRESET (1 << 22) + +/* %ecx */ +#define bit_SSE3 (1 << 0) +#define bit_PCLMUL (1 << 1) +#define bit_LZCNT (1 << 5) +#define bit_SSSE3 (1 << 9) +#define bit_FMA (1 << 12) +#define bit_CMPXCHG16B (1 << 13) +#define bit_SSE4_1 (1 << 19) +#define bit_SSE4_2 (1 << 20) +#define bit_MOVBE (1 << 22) +#define bit_POPCNT (1 << 23) +#define bit_AES (1 << 25) +#define bit_XSAVE (1 << 26) +#define bit_OSXSAVE (1 << 27) +#define bit_AVX (1 << 28) +#define bit_F16C (1 << 29) +#define bit_RDRND (1 << 30) + +/* %edx */ +#define bit_CMPXCHG8B (1 << 8) +#define bit_CMOV (1 << 15) +#define bit_MMX (1 << 23) +#define bit_FXSAVE (1 << 24) +#define bit_SSE (1 << 25) +#define bit_SSE2 (1 << 26) + +/* Extended Features (%eax == 0x80000001) */ +/* %ecx */ +#define bit_LAHF_LM (1 << 0) +#define bit_ABM (1 << 5) +#define bit_SSE4a (1 << 6) +#define bit_PRFCHW (1 << 8) +#define bit_XOP (1 << 11) +#define bit_LWP (1 << 15) +#define bit_FMA4 (1 << 16) +#define bit_TBM (1 << 21) +#define bit_MWAITX (1 << 29) + +/* %edx */ +#define bit_MMXEXT (1 << 22) +#define bit_LM (1 << 29) +#define bit_3DNOWP (1 << 30) +#define bit_3DNOW (1u << 31) + +/* %ebx */ +#define bit_CLZERO (1 << 0) +#define bit_WBNOINVD (1 << 9) + +/* Extended Features (%eax == 7) */ +/* %ebx */ +#define bit_FSGSBASE (1 << 0) +#define bit_SGX (1 << 2) +#define bit_BMI (1 << 3) +#define bit_HLE (1 << 4) +#define bit_AVX2 (1 << 5) +#define bit_BMI2 (1 << 8) +#define bit_RTM (1 << 11) +#define bit_AVX512F (1 << 16) +#define bit_AVX512DQ (1 << 17) +#define bit_RDSEED (1 << 18) +#define bit_ADX (1 << 19) +#define bit_AVX512IFMA (1 << 21) +#define bit_CLFLUSHOPT (1 << 23) +#define bit_CLWB (1 << 24) +#define bit_AVX512PF (1 << 26) +#define bit_AVX512ER (1 << 27) +#define bit_AVX512CD (1 << 28) +#define bit_SHA (1 << 29) +#define bit_AVX512BW (1 << 30) +#define bit_AVX512VL (1u << 31) + +/* %ecx */ +#define bit_PREFETCHWT1 (1 << 0) +#define bit_AVX512VBMI (1 << 1) +#define bit_PKU (1 << 3) +#define bit_OSPKE (1 << 4) +#define bit_WAITPKG (1 << 5) +#define bit_AVX512VBMI2 (1 << 6) +#define bit_SHSTK (1 << 7) +#define bit_GFNI (1 << 8) +#define bit_VAES (1 << 9) +#define bit_AVX512VNNI (1 << 11) +#define bit_VPCLMULQDQ (1 << 10) +#define bit_AVX512BITALG (1 << 12) +#define bit_AVX512VPOPCNTDQ (1 << 14) +#define bit_RDPID (1 << 22) +#define bit_MOVDIRI (1 << 27) +#define bit_MOVDIR64B (1 << 28) +#define bit_ENQCMD (1 << 29) +#define bit_CLDEMOTE (1 << 25) +#define bit_KL (1 << 23) + +/* %edx */ +#define bit_AVX5124VNNIW (1 << 2) +#define bit_AVX5124FMAPS (1 << 3) +#define bit_AVX512VP2INTERSECT (1 << 8) +#define bit_AVX512FP16 (1 << 23) +#define bit_IBT (1 << 20) +#define bit_UINTR (1 << 5) +#define bit_PCONFIG (1 << 18) +#define bit_SERIALIZE (1 << 14) +#define bit_TSXLDTRK (1 << 16) +#define bit_AMX_BF16 (1 << 22) +#define bit_AMX_TILE (1 << 24) +#define bit_AMX_INT8 (1 << 25) + +/* Extended State Enumeration Sub-leaf (%eax == 0xd, %ecx == 1) */ +#define bit_XSAVEOPT (1 << 0) +#define bit_XSAVEC (1 << 1) +#define bit_XSAVES (1 << 3) + +/* PT sub leaf (%eax == 0x14, %ecx == 0) */ +/* %ebx */ +#define bit_PTWRITE (1 << 4) + +/* Keylocker leaf (%eax == 0x19) */ +/* %ebx */ +#define bit_AESKLE ( 1<<0 ) +#define bit_WIDEKL ( 1<<2 ) + + +/* Signatures for different CPU implementations as returned in uses + of cpuid with level 0. */ +#define signature_AMD_ebx 0x68747541 +#define signature_AMD_ecx 0x444d4163 +#define signature_AMD_edx 0x69746e65 + +#define signature_CENTAUR_ebx 0x746e6543 +#define signature_CENTAUR_ecx 0x736c7561 +#define signature_CENTAUR_edx 0x48727561 + +#define signature_CYRIX_ebx 0x69727943 +#define signature_CYRIX_ecx 0x64616574 +#define signature_CYRIX_edx 0x736e4978 + +#define signature_INTEL_ebx 0x756e6547 +#define signature_INTEL_ecx 0x6c65746e +#define signature_INTEL_edx 0x49656e69 + +#define signature_TM1_ebx 0x6e617254 +#define signature_TM1_ecx 0x55504361 +#define signature_TM1_edx 0x74656d73 + +#define signature_TM2_ebx 0x756e6547 +#define signature_TM2_ecx 0x3638784d +#define signature_TM2_edx 0x54656e69 + +#define signature_NSC_ebx 0x646f6547 +#define signature_NSC_ecx 0x43534e20 +#define signature_NSC_edx 0x79622065 + +#define signature_NEXGEN_ebx 0x4778654e +#define signature_NEXGEN_ecx 0x6e657669 +#define signature_NEXGEN_edx 0x72446e65 + +#define signature_RISE_ebx 0x65736952 +#define signature_RISE_ecx 0x65736952 +#define signature_RISE_edx 0x65736952 + +#define signature_SIS_ebx 0x20536953 +#define signature_SIS_ecx 0x20536953 +#define signature_SIS_edx 0x20536953 + +#define signature_UMC_ebx 0x20434d55 +#define signature_UMC_ecx 0x20434d55 +#define signature_UMC_edx 0x20434d55 + +#define signature_VIA_ebx 0x20414956 +#define signature_VIA_ecx 0x20414956 +#define signature_VIA_edx 0x20414956 + +#define signature_VORTEX_ebx 0x74726f56 +#define signature_VORTEX_ecx 0x436f5320 +#define signature_VORTEX_edx 0x36387865 + +#ifndef __x86_64__ +/* At least one cpu (Winchip 2) does not set %ebx and %ecx + for cpuid leaf 1. Forcibly zero the two registers before + calling cpuid as a precaution. */ +#define __cpuid(level, a, b, c, d) \ + do { \ + if (__builtin_constant_p (level) && (level) != 1) \ + __asm__ __volatile__ ("cpuid\n\t" \ + : "=a" (a), "=b" (b), "=c" (c), "=d" (d) \ + : "0" (level)); \ + else \ + __asm__ __volatile__ ("cpuid\n\t" \ + : "=a" (a), "=b" (b), "=c" (c), "=d" (d) \ + : "0" (level), "1" (0), "2" (0)); \ + } while (0) +#else +#define __cpuid(level, a, b, c, d) \ + __asm__ __volatile__ ("cpuid\n\t" \ + : "=a" (a), "=b" (b), "=c" (c), "=d" (d) \ + : "0" (level)) +#endif + +#define __cpuid_count(level, count, a, b, c, d) \ + __asm__ __volatile__ ("cpuid\n\t" \ + : "=a" (a), "=b" (b), "=c" (c), "=d" (d) \ + : "0" (level), "2" (count)) + + +/* Return highest supported input value for cpuid instruction. ext can + be either 0x0 or 0x80000000 to return highest supported value for + basic or extended cpuid information. Function returns 0 if cpuid + is not supported or whatever cpuid returns in eax register. If sig + pointer is non-null, then first four bytes of the signature + (as found in ebx register) are returned in location pointed by sig. */ + +static __inline unsigned int +__get_cpuid_max (unsigned int __ext, unsigned int *__sig) +{ + unsigned int __eax, __ebx, __ecx, __edx; + +#ifndef __x86_64__ + /* See if we can use cpuid. On AMD64 we always can. */ +#if __GNUC__ >= 3 + __asm__ ("pushf{l|d}\n\t" + "pushf{l|d}\n\t" + "pop{l}\t%0\n\t" + "mov{l}\t{%0, %1|%1, %0}\n\t" + "xor{l}\t{%2, %0|%0, %2}\n\t" + "push{l}\t%0\n\t" + "popf{l|d}\n\t" + "pushf{l|d}\n\t" + "pop{l}\t%0\n\t" + "popf{l|d}\n\t" + : "=&r" (__eax), "=&r" (__ebx) + : "i" (0x00200000)); +#else +/* Host GCCs older than 3.0 weren't supporting Intel asm syntax + nor alternatives in i386 code. */ + __asm__ ("pushfl\n\t" + "pushfl\n\t" + "popl\t%0\n\t" + "movl\t%0, %1\n\t" + "xorl\t%2, %0\n\t" + "pushl\t%0\n\t" + "popfl\n\t" + "pushfl\n\t" + "popl\t%0\n\t" + "popfl\n\t" + : "=&r" (__eax), "=&r" (__ebx) + : "i" (0x00200000)); +#endif + + if (!((__eax ^ __ebx) & 0x00200000)) + return 0; +#endif + + /* Host supports cpuid. Return highest supported cpuid input value. */ + __cpuid (__ext, __eax, __ebx, __ecx, __edx); + + if (__sig) + *__sig = __ebx; + + return __eax; +} + +/* Return cpuid data for requested cpuid leaf, as found in returned + eax, ebx, ecx and edx registers. The function checks if cpuid is + supported and returns 1 for valid cpuid information or 0 for + unsupported cpuid leaf. All pointers are required to be non-null. */ + +static __inline int +__get_cpuid (unsigned int __leaf, + unsigned int *__eax, unsigned int *__ebx, + unsigned int *__ecx, unsigned int *__edx) +{ + unsigned int __ext = __leaf & 0x80000000; + unsigned int __maxlevel = __get_cpuid_max (__ext, 0); + + if (__maxlevel == 0 || __maxlevel < __leaf) + return 0; + + __cpuid (__leaf, *__eax, *__ebx, *__ecx, *__edx); + return 1; +} + +/* Same as above, but sub-leaf can be specified. */ + +static __inline int +__get_cpuid_count (unsigned int __leaf, unsigned int __subleaf, + unsigned int *__eax, unsigned int *__ebx, + unsigned int *__ecx, unsigned int *__edx) +{ + unsigned int __ext = __leaf & 0x80000000; + unsigned int __maxlevel = __get_cpuid_max (__ext, 0); + + if (__maxlevel == 0 || __maxlevel < __leaf) + return 0; + + __cpuid_count (__leaf, __subleaf, *__eax, *__ebx, *__ecx, *__edx); + return 1; +} + +static __inline void +__cpuidex (int __cpuid_info[4], int __leaf, int __subleaf) +{ + __cpuid_count (__leaf, __subleaf, __cpuid_info[0], __cpuid_info[1], + __cpuid_info[2], __cpuid_info[3]); +} + +#endif /* _CPUID_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/cross-stdarg.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/cross-stdarg.h new file mode 100644 index 0000000..f07d01a --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/cross-stdarg.h @@ -0,0 +1,72 @@ +/* Copyright (C) 2002-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef __CROSS_STDARG_H_INCLUDED +#define __CROSS_STDARG_H_INCLUDED + +/* Make sure that for non x64 targets cross builtins are defined. */ +#ifndef __x86_64__ +/* Call abi ms_abi. */ +#define __builtin_ms_va_list __builtin_va_list +#define __builtin_ms_va_copy __builtin_va_copy +#define __builtin_ms_va_start __builtin_va_start +#define __builtin_ms_va_end __builtin_va_end + +/* Call abi sysv_abi. */ +#define __builtin_sysv_va_list __builtin_va_list +#define __builtin_sysv_va_copy __builtin_va_copy +#define __builtin_sysv_va_start __builtin_va_start +#define __builtin_sysv_va_end __builtin_va_end +#endif + +#define __ms_va_copy(__d,__s) __builtin_ms_va_copy(__d,__s) +#define __ms_va_start(__v,__l) __builtin_ms_va_start(__v,__l) +#define __ms_va_arg(__v,__l) __builtin_va_arg(__v,__l) +#define __ms_va_end(__v) __builtin_ms_va_end(__v) + +#define __sysv_va_copy(__d,__s) __builtin_sysv_va_copy(__d,__s) +#define __sysv_va_start(__v,__l) __builtin_sysv_va_start(__v,__l) +#define __sysv_va_arg(__v,__l) __builtin_va_arg(__v,__l) +#define __sysv_va_end(__v) __builtin_sysv_va_end(__v) + +#ifndef __GNUC_SYSV_VA_LIST +#define __GNUC_SYSV_VA_LIST + typedef __builtin_sysv_va_list __gnuc_sysv_va_list; +#endif + +#ifndef _SYSV_VA_LIST_DEFINED +#define _SYSV_VA_LIST_DEFINED + typedef __gnuc_sysv_va_list sysv_va_list; +#endif + +#ifndef __GNUC_MS_VA_LIST +#define __GNUC_MS_VA_LIST + typedef __builtin_ms_va_list __gnuc_ms_va_list; +#endif + +#ifndef _MS_VA_LIST_DEFINED +#define _MS_VA_LIST_DEFINED + typedef __gnuc_ms_va_list ms_va_list; +#endif + +#endif /* __CROSS_STDARG_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/emmintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/emmintrin.h new file mode 100644 index 0000000..654a8e8 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/emmintrin.h @@ -0,0 +1,1602 @@ +/* Copyright (C) 2003-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _EMMINTRIN_H_INCLUDED +#define _EMMINTRIN_H_INCLUDED + +/* We need definitions from the SSE header files*/ +#include + +#ifndef __SSE2__ +#pragma GCC push_options +#pragma GCC target("sse2") +#define __DISABLE_SSE2__ +#endif /* __SSE2__ */ + +/* SSE2 */ +typedef double __v2df __attribute__ ((__vector_size__ (16))); +typedef long long __v2di __attribute__ ((__vector_size__ (16))); +typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16))); +typedef int __v4si __attribute__ ((__vector_size__ (16))); +typedef unsigned int __v4su __attribute__ ((__vector_size__ (16))); +typedef short __v8hi __attribute__ ((__vector_size__ (16))); +typedef unsigned short __v8hu __attribute__ ((__vector_size__ (16))); +typedef char __v16qi __attribute__ ((__vector_size__ (16))); +typedef signed char __v16qs __attribute__ ((__vector_size__ (16))); +typedef unsigned char __v16qu __attribute__ ((__vector_size__ (16))); + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__)); +typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); + +/* Unaligned version of the same types. */ +typedef long long __m128i_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1))); +typedef double __m128d_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1))); + +/* Create a selector for use with the SHUFPD instruction. */ +#define _MM_SHUFFLE2(fp1,fp0) \ + (((fp1) << 1) | (fp0)) + +/* Create a vector with element 0 as F and the rest zero. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_sd (double __F) +{ + return __extension__ (__m128d){ __F, 0.0 }; +} + +/* Create a vector with both elements equal to F. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_pd (double __F) +{ + return __extension__ (__m128d){ __F, __F }; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pd1 (double __F) +{ + return _mm_set1_pd (__F); +} + +/* Create a vector with the lower value X and upper value W. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pd (double __W, double __X) +{ + return __extension__ (__m128d){ __X, __W }; +} + +/* Create a vector with the lower value W and upper value X. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_pd (double __W, double __X) +{ + return __extension__ (__m128d){ __W, __X }; +} + +/* Create an undefined vector. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_undefined_pd (void) +{ + __m128d __Y = __Y; + return __Y; +} + +/* Create a vector of zeros. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setzero_pd (void) +{ + return __extension__ (__m128d){ 0.0, 0.0 }; +} + +/* Sets the low DPFP value of A from the low value of B. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_move_sd (__m128d __A, __m128d __B) +{ + return __extension__ (__m128d) __builtin_shuffle ((__v2df)__A, (__v2df)__B, (__v2di){2, 1}); +} + +/* Load two DPFP values from P. The address must be 16-byte aligned. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_pd (double const *__P) +{ + return *(__m128d *)__P; +} + +/* Load two DPFP values from P. The address need not be 16-byte aligned. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_pd (double const *__P) +{ + return *(__m128d_u *)__P; +} + +/* Create a vector with all two elements equal to *P. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load1_pd (double const *__P) +{ + return _mm_set1_pd (*__P); +} + +/* Create a vector with element 0 as *P and the rest zero. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_sd (double const *__P) +{ + return _mm_set_sd (*__P); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_pd1 (double const *__P) +{ + return _mm_load1_pd (__P); +} + +/* Load two DPFP values in reverse order. The address must be aligned. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadr_pd (double const *__P) +{ + __m128d __tmp = _mm_load_pd (__P); + return __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1)); +} + +/* Store two DPFP values. The address must be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_pd (double *__P, __m128d __A) +{ + *(__m128d *)__P = __A; +} + +/* Store two DPFP values. The address need not be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_pd (double *__P, __m128d __A) +{ + *(__m128d_u *)__P = __A; +} + +/* Stores the lower DPFP value. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_sd (double *__P, __m128d __A) +{ + *__P = ((__v2df)__A)[0]; +} + +extern __inline double __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_f64 (__m128d __A) +{ + return ((__v2df)__A)[0]; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storel_pd (double *__P, __m128d __A) +{ + _mm_store_sd (__P, __A); +} + +/* Stores the upper DPFP value. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeh_pd (double *__P, __m128d __A) +{ + *__P = ((__v2df)__A)[1]; +} + +/* Store the lower DPFP value across two words. + The address must be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store1_pd (double *__P, __m128d __A) +{ + _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,0))); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_pd1 (double *__P, __m128d __A) +{ + _mm_store1_pd (__P, __A); +} + +/* Store two DPFP values in reverse order. The address must be aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storer_pd (double *__P, __m128d __A) +{ + _mm_store_pd (__P, __builtin_ia32_shufpd (__A, __A, _MM_SHUFFLE2 (0,1))); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi128_si32 (__m128i __A) +{ + return __builtin_ia32_vec_ext_v4si ((__v4si)__A, 0); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi128_si64 (__m128i __A) +{ + return ((__v2di)__A)[0]; +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi128_si64x (__m128i __A) +{ + return ((__v2di)__A)[0]; +} +#endif + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_pd (__m128d __A, __m128d __B) +{ + return (__m128d) ((__v2df)__A + (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_pd (__m128d __A, __m128d __B) +{ + return (__m128d) ((__v2df)__A - (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_pd (__m128d __A, __m128d __B) +{ + return (__m128d) ((__v2df)__A * (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_pd (__m128d __A, __m128d __B) +{ + return (__m128d) ((__v2df)__A / (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_pd (__m128d __A) +{ + return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A); +} + +/* Return pair {sqrt (B[0]), A[1]}. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_sd (__m128d __A, __m128d __B) +{ + __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B); + return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_and_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_andnot_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnlt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnle_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpngt_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnge_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpord_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpunord_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpltsd ((__v2df) __B, + (__v2df) + __A)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmplesd ((__v2df) __B, + (__v2df) + __A)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnlt_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnle_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpngt_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpnltsd ((__v2df) __B, + (__v2df) + __A)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnge_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df) __A, + (__v2df) + __builtin_ia32_cmpnlesd ((__v2df) __B, + (__v2df) + __A)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpord_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpunord_sd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comieq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comilt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comile_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comigt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comige_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comineq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomieq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomilt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomile_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomigt_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomige_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomineq_sd (__m128d __A, __m128d __B) +{ + return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B); +} + +/* Create a vector of Qi, where i is the element number. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi64x (long long __q1, long long __q0) +{ + return __extension__ (__m128i)(__v2di){ __q0, __q1 }; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi64 (__m64 __q1, __m64 __q0) +{ + return _mm_set_epi64x ((long long)__q1, (long long)__q0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0) +{ + return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 }; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4, + short __q3, short __q2, short __q1, short __q0) +{ + return __extension__ (__m128i)(__v8hi){ + __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 }; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12, + char __q11, char __q10, char __q09, char __q08, + char __q07, char __q06, char __q05, char __q04, + char __q03, char __q02, char __q01, char __q00) +{ + return __extension__ (__m128i)(__v16qi){ + __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07, + __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15 + }; +} + +/* Set all of the elements of the vector to A. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi64x (long long __A) +{ + return _mm_set_epi64x (__A, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi64 (__m64 __A) +{ + return _mm_set_epi64 (__A, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi32 (int __A) +{ + return _mm_set_epi32 (__A, __A, __A, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi16 (short __A) +{ + return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_epi8 (char __A) +{ + return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A, + __A, __A, __A, __A, __A, __A, __A, __A); +} + +/* Create a vector of Qi, where i is the element number. + The parameter order is reversed from the _mm_set_epi* functions. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_epi64 (__m64 __q0, __m64 __q1) +{ + return _mm_set_epi64 (__q1, __q0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3) +{ + return _mm_set_epi32 (__q3, __q2, __q1, __q0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3, + short __q4, short __q5, short __q6, short __q7) +{ + return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03, + char __q04, char __q05, char __q06, char __q07, + char __q08, char __q09, char __q10, char __q11, + char __q12, char __q13, char __q14, char __q15) +{ + return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08, + __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00); +} + +/* Create a vector with element 0 as *P and the rest zero. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_si128 (__m128i const *__P) +{ + return *__P; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_si128 (__m128i_u const *__P) +{ + return *__P; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadl_epi64 (__m128i_u const *__P) +{ + return _mm_set_epi64 ((__m64)0LL, *(__m64_u *)__P); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_si64 (void const *__P) +{ + return _mm_loadl_epi64 ((__m128i_u *)__P); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_si32 (void const *__P) +{ + return _mm_set_epi32 (0, 0, 0, (*(__m32_u *)__P)[0]); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_si16 (void const *__P) +{ + return _mm_set_epi16 (0, 0, 0, 0, 0, 0, 0, (*(__m16_u *)__P)[0]); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_si128 (__m128i *__P, __m128i __B) +{ + *__P = __B; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_si128 (__m128i_u *__P, __m128i __B) +{ + *__P = __B; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storel_epi64 (__m128i_u *__P, __m128i __B) +{ + *(__m64_u *)__P = (__m64) ((__v2di)__B)[0]; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_si64 (void *__P, __m128i __B) +{ + _mm_storel_epi64 ((__m128i_u *)__P, __B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_si32 (void *__P, __m128i __B) +{ + *(__m32_u *)__P = (__m32) ((__v4si)__B)[0]; +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_si16 (void *__P, __m128i __B) +{ + *(__m16_u *)__P = (__m16) ((__v8hi)__B)[0]; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movepi64_pi64 (__m128i __B) +{ + return (__m64) ((__v2di)__B)[0]; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movpi64_epi64 (__m64 __A) +{ + return _mm_set_epi64 ((__m64)0LL, __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_move_epi64 (__m128i __A) +{ + return (__m128i)__builtin_ia32_movq128 ((__v2di) __A); +} + +/* Create an undefined vector. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_undefined_si128 (void) +{ + __m128i __Y = __Y; + return __Y; +} + +/* Create a vector of zeros. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setzero_si128 (void) +{ + return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 }; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi32_pd (__m128i __A) +{ + return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi32_ps (__m128i __A) +{ + return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_epi32 (__m128d __A) +{ + return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_pi32 (__m128d __A) +{ + return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpd_ps (__m128d __A) +{ + return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttpd_epi32 (__m128d __A) +{ + return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttpd_pi32 (__m128d __A) +{ + return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi32_pd (__m64 __A) +{ + return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_epi32 (__m128 __A) +{ + return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttps_epi32 (__m128 __A) +{ + return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_pd (__m128 __A) +{ + return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_si32 (__m128d __A) +{ + return __builtin_ia32_cvtsd2si ((__v2df) __A); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_si64 (__m128d __A) +{ + return __builtin_ia32_cvtsd2si64 ((__v2df) __A); +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_si64x (__m128d __A) +{ + return __builtin_ia32_cvtsd2si64 ((__v2df) __A); +} +#endif + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_si32 (__m128d __A) +{ + return __builtin_ia32_cvttsd2si ((__v2df) __A); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_si64 (__m128d __A) +{ + return __builtin_ia32_cvttsd2si64 ((__v2df) __A); +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttsd_si64x (__m128d __A) +{ + return __builtin_ia32_cvttsd2si64 ((__v2df) __A); +} +#endif + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsd_ss (__m128 __A, __m128d __B) +{ + return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi32_sd (__m128d __A, int __B) +{ + return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_sd (__m128d __A, long long __B) +{ + return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B); +} + +/* Microsoft intrinsic. */ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64x_sd (__m128d __A, long long __B) +{ + return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B); +} +#endif + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_sd (__m128d __A, __m128 __B) +{ + return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask) +{ + return (__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, __mask); +} +#else +#define _mm_shuffle_pd(A, B, N) \ + ((__m128d)__builtin_ia32_shufpd ((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(N))) +#endif + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_pd (__m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadh_pd (__m128d __A, double const *__B) +{ + return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, __B); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadl_pd (__m128d __A, double const *__B) +{ + return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, __B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_pd (__m128d __A) +{ + return __builtin_ia32_movmskpd ((__v2df)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packs_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packs_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packus_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v16qu)__A + (__v16qu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v8hu)__A + (__v8hu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v4su)__A + (__v4su)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v2du)__A + (__v2du)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v16qu)__A - (__v16qu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v8hu)__A - (__v8hu)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v4su)__A - (__v4su)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v2du)__A - (__v2du)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_madd_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhi_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mullo_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v8hu)__A * (__v8hu)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_su32 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_epu32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_epi64 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srai_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srai_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_bsrli_si128 (__m128i __A, const int __N) +{ + return (__m128i)__builtin_ia32_psrldqi128 (__A, __N * 8); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_bslli_si128 (__m128i __A, const int __N) +{ + return (__m128i)__builtin_ia32_pslldqi128 (__A, __N * 8); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_si128 (__m128i __A, const int __N) +{ + return (__m128i)__builtin_ia32_psrldqi128 (__A, __N * 8); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_si128 (__m128i __A, const int __N) +{ + return (__m128i)__builtin_ia32_pslldqi128 (__A, __N * 8); +} +#else +#define _mm_bsrli_si128(A, N) \ + ((__m128i)__builtin_ia32_psrldqi128 ((__m128i)(A), (int)(N) * 8)) +#define _mm_bslli_si128(A, N) \ + ((__m128i)__builtin_ia32_pslldqi128 ((__m128i)(A), (int)(N) * 8)) +#define _mm_srli_si128(A, N) \ + ((__m128i)__builtin_ia32_psrldqi128 ((__m128i)(A), (int)(N) * 8)) +#define _mm_slli_si128(A, N) \ + ((__m128i)__builtin_ia32_pslldqi128 ((__m128i)(A), (int)(N) * 8)) +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_epi16 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_epi32 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_epi64 (__m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllw128((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pslld128((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllq128((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sra_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psraw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sra_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrad128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrld128 ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlq128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_and_si128 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v2du)__A & (__v2du)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_andnot_si128 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_si128 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v2du)__A | (__v2du)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_si128 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v2du)__A ^ (__v2du)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v16qi)__A == (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v8hi)__A == (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v4si)__A == (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v16qs)__A < (__v16qs)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v8hi)__A < (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v4si)__A < (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v16qs)__A > (__v16qs)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v8hi)__A > (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i) ((__v4si)__A > (__v4si)__B); +} + +#ifdef __OPTIMIZE__ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_epi16 (__m128i const __A, int const __N) +{ + return (unsigned short) __builtin_ia32_vec_ext_v8hi ((__v8hi)__A, __N); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_epi16 (__m128i const __A, int const __D, int const __N) +{ + return (__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)__A, __D, __N); +} +#else +#define _mm_extract_epi16(A, N) \ + ((int) (unsigned short) __builtin_ia32_vec_ext_v8hi ((__v8hi)(__m128i)(A), (int)(N))) +#define _mm_insert_epi16(A, D, N) \ + ((__m128i) __builtin_ia32_vec_set_v8hi ((__v8hi)(__m128i)(A), \ + (int)(D), (int)(N))) +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_epi8 (__m128i __A) +{ + return __builtin_ia32_pmovmskb128 ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhi_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shufflehi_epi16 (__m128i __A, const int __mask) +{ + return (__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __mask); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shufflelo_epi16 (__m128i __A, const int __mask) +{ + return (__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __mask); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_epi32 (__m128i __A, const int __mask) +{ + return (__m128i)__builtin_ia32_pshufd ((__v4si)__A, __mask); +} +#else +#define _mm_shufflehi_epi16(A, N) \ + ((__m128i)__builtin_ia32_pshufhw ((__v8hi)(__m128i)(A), (int)(N))) +#define _mm_shufflelo_epi16(A, N) \ + ((__m128i)__builtin_ia32_pshuflw ((__v8hi)(__m128i)(A), (int)(N))) +#define _mm_shuffle_epi32(A, N) \ + ((__m128i)__builtin_ia32_pshufd ((__v4si)(__m128i)(A), (int)(N))) +#endif + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C) +{ + __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_avg_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_avg_epu16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sad_epu8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_si32 (int *__A, int __B) +{ + __builtin_ia32_movnti (__A, __B); +} + +#ifdef __x86_64__ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_si64 (long long int *__A, long long int __B) +{ + __builtin_ia32_movnti64 (__A, __B); +} +#endif + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_si128 (__m128i *__A, __m128i __B) +{ + __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_pd (double *__A, __m128d __B) +{ + __builtin_ia32_movntpd (__A, (__v2df)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_clflush (void const *__A) +{ + __builtin_ia32_clflush (__A); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_lfence (void) +{ + __builtin_ia32_lfence (); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mfence (void) +{ + __builtin_ia32_mfence (); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi32_si128 (int __A) +{ + return _mm_set_epi32 (0, 0, 0, __A); +} + +#ifdef __x86_64__ +/* Intel intrinsic. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_si128 (long long __A) +{ + return _mm_set_epi64x (0, __A); +} + +/* Microsoft intrinsic. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64x_si128 (long long __A) +{ + return _mm_set_epi64x (0, __A); +} +#endif + +/* Casts between various SP, DP, INT vector types. Note that these do no + conversion of values, they just change the type. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castpd_ps(__m128d __A) +{ + return (__m128) __A; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castpd_si128(__m128d __A) +{ + return (__m128i) __A; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castps_pd(__m128 __A) +{ + return (__m128d) __A; +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castps_si128(__m128 __A) +{ + return (__m128i) __A; +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castsi128_ps(__m128i __A) +{ + return (__m128) __A; +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_castsi128_pd(__m128i __A) +{ + return (__m128d) __A; +} + +#ifdef __DISABLE_SSE2__ +#undef __DISABLE_SSE2__ +#pragma GCC pop_options +#endif /* __DISABLE_SSE2__ */ + +#endif /* _EMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/enqcmdintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/enqcmdintrin.h new file mode 100644 index 0000000..25e5189 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/enqcmdintrin.h @@ -0,0 +1,55 @@ +/* Copyright (C) 2019-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _ENQCMDINTRIN_H_INCLUDED +#define _ENQCMDINTRIN_H_INCLUDED + +#ifndef __ENQCMD__ +#pragma GCC push_options +#pragma GCC target ("enqcmd") +#define __DISABLE_ENQCMD__ +#endif /* __ENQCMD__ */ + +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_enqcmd (void * __P, const void * __Q) +{ + return __builtin_ia32_enqcmd (__P, __Q); +} + +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_enqcmds (void * __P, const void * __Q) +{ + return __builtin_ia32_enqcmds (__P, __Q); +} + +#ifdef __DISABLE_ENQCMD__ +#undef __DISABLE_ENQCMD__ +#pragma GCC pop_options +#endif /* __DISABLE_ENQCMD__ */ +#endif /* _ENQCMDINTRIN_H_INCLUDED. */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/f16cintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/f16cintrin.h new file mode 100644 index 0000000..1e221c6 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/f16cintrin.h @@ -0,0 +1,98 @@ +/* Copyright (C) 2011-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _X86INTRIN_H_INCLUDED && !defined _IMMINTRIN_H_INCLUDED +# error "Never use directly; include or instead." +#endif + +#ifndef _F16CINTRIN_H_INCLUDED +#define _F16CINTRIN_H_INCLUDED + +#ifndef __F16C__ +#pragma GCC push_options +#pragma GCC target("f16c") +#define __DISABLE_F16C__ +#endif /* __F16C__ */ + +extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_cvtsh_ss (unsigned short __S) +{ + __v8hi __H = __extension__ (__v8hi){ (short) __S, 0, 0, 0, 0, 0, 0, 0 }; + __v4sf __A = __builtin_ia32_vcvtph2ps (__H); + return __builtin_ia32_vec_ext_v4sf (__A, 0); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtph_ps (__m128i __A) +{ + return (__m128) __builtin_ia32_vcvtph2ps ((__v8hi) __A); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtph_ps (__m128i __A) +{ + return (__m256) __builtin_ia32_vcvtph2ps256 ((__v8hi) __A); +} + +#ifdef __OPTIMIZE__ +extern __inline unsigned short __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_cvtss_sh (float __F, const int __I) +{ + __v4sf __A = __extension__ (__v4sf){ __F, 0, 0, 0 }; + __v8hi __H = __builtin_ia32_vcvtps2ph (__A, __I); + return (unsigned short) __builtin_ia32_vec_ext_v8hi (__H, 0); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_ph (__m128 __A, const int __I) +{ + return (__m128i) __builtin_ia32_vcvtps2ph ((__v4sf) __A, __I); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cvtps_ph (__m256 __A, const int __I) +{ + return (__m128i) __builtin_ia32_vcvtps2ph256 ((__v8sf) __A, __I); +} +#else +#define _cvtss_sh(__F, __I) \ + (__extension__ \ + ({ \ + __v4sf __A = __extension__ (__v4sf){ __F, 0, 0, 0 }; \ + __v8hi __H = __builtin_ia32_vcvtps2ph (__A, __I); \ + (unsigned short) __builtin_ia32_vec_ext_v8hi (__H, 0); \ + })) + +#define _mm_cvtps_ph(A, I) \ + ((__m128i) __builtin_ia32_vcvtps2ph ((__v4sf)(__m128) (A), (int) (I))) + +#define _mm256_cvtps_ph(A, I) \ + ((__m128i) __builtin_ia32_vcvtps2ph256 ((__v8sf)(__m256) (A), (int) (I))) +#endif /* __OPTIMIZE */ + +#ifdef __DISABLE_F16C__ +#undef __DISABLE_F16C__ +#pragma GCC pop_options +#endif /* __DISABLE_F16C__ */ + +#endif /* _F16CINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/float.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/float.h new file mode 100644 index 0000000..9d368c4 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/float.h @@ -0,0 +1,626 @@ +/* Copyright (C) 2002-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* + * ISO C Standard: 5.2.4.2.2 Characteristics of floating types + */ + +#ifndef _FLOAT_H___ +#define _FLOAT_H___ + +/* Radix of exponent representation, b. */ +#undef FLT_RADIX +#define FLT_RADIX __FLT_RADIX__ + +/* Number of base-FLT_RADIX digits in the significand, p. */ +#undef FLT_MANT_DIG +#undef DBL_MANT_DIG +#undef LDBL_MANT_DIG +#define FLT_MANT_DIG __FLT_MANT_DIG__ +#define DBL_MANT_DIG __DBL_MANT_DIG__ +#define LDBL_MANT_DIG __LDBL_MANT_DIG__ + +/* Number of decimal digits, q, such that any floating-point number with q + decimal digits can be rounded into a floating-point number with p radix b + digits and back again without change to the q decimal digits, + + p * log10(b) if b is a power of 10 + floor((p - 1) * log10(b)) otherwise +*/ +#undef FLT_DIG +#undef DBL_DIG +#undef LDBL_DIG +#define FLT_DIG __FLT_DIG__ +#define DBL_DIG __DBL_DIG__ +#define LDBL_DIG __LDBL_DIG__ + +/* Minimum int x such that FLT_RADIX**(x-1) is a normalized float, emin */ +#undef FLT_MIN_EXP +#undef DBL_MIN_EXP +#undef LDBL_MIN_EXP +#define FLT_MIN_EXP __FLT_MIN_EXP__ +#define DBL_MIN_EXP __DBL_MIN_EXP__ +#define LDBL_MIN_EXP __LDBL_MIN_EXP__ + +/* Minimum negative integer such that 10 raised to that power is in the + range of normalized floating-point numbers, + + ceil(log10(b) * (emin - 1)) +*/ +#undef FLT_MIN_10_EXP +#undef DBL_MIN_10_EXP +#undef LDBL_MIN_10_EXP +#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__ +#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__ +#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__ + +/* Maximum int x such that FLT_RADIX**(x-1) is a representable float, emax. */ +#undef FLT_MAX_EXP +#undef DBL_MAX_EXP +#undef LDBL_MAX_EXP +#define FLT_MAX_EXP __FLT_MAX_EXP__ +#define DBL_MAX_EXP __DBL_MAX_EXP__ +#define LDBL_MAX_EXP __LDBL_MAX_EXP__ + +/* Maximum integer such that 10 raised to that power is in the range of + representable finite floating-point numbers, + + floor(log10((1 - b**-p) * b**emax)) +*/ +#undef FLT_MAX_10_EXP +#undef DBL_MAX_10_EXP +#undef LDBL_MAX_10_EXP +#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__ +#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__ +#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__ + +/* Maximum representable finite floating-point number, + + (1 - b**-p) * b**emax +*/ +#undef FLT_MAX +#undef DBL_MAX +#undef LDBL_MAX +#define FLT_MAX __FLT_MAX__ +#define DBL_MAX __DBL_MAX__ +#define LDBL_MAX __LDBL_MAX__ + +/* The difference between 1 and the least value greater than 1 that is + representable in the given floating point type, b**1-p. */ +#undef FLT_EPSILON +#undef DBL_EPSILON +#undef LDBL_EPSILON +#define FLT_EPSILON __FLT_EPSILON__ +#define DBL_EPSILON __DBL_EPSILON__ +#define LDBL_EPSILON __LDBL_EPSILON__ + +/* Minimum normalized positive floating-point number, b**(emin - 1). */ +#undef FLT_MIN +#undef DBL_MIN +#undef LDBL_MIN +#define FLT_MIN __FLT_MIN__ +#define DBL_MIN __DBL_MIN__ +#define LDBL_MIN __LDBL_MIN__ + +/* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown. */ +/* ??? This is supposed to change with calls to fesetround in . */ +#undef FLT_ROUNDS +#define FLT_ROUNDS 1 + +#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) \ + || (defined (__cplusplus) && __cplusplus >= 201103L) +/* The floating-point expression evaluation method. The precise + definitions of these values are generalised to include support for + the interchange and extended types defined in ISO/IEC TS 18661-3. + Prior to this (for C99/C11) the definitions were: + + -1 indeterminate + 0 evaluate all operations and constants just to the range and + precision of the type + 1 evaluate operations and constants of type float and double + to the range and precision of the double type, evaluate + long double operations and constants to the range and + precision of the long double type + 2 evaluate all operations and constants to the range and + precision of the long double type + + The TS 18661-3 definitions are: + + -1 indeterminate + 0 evaluate all operations and constants, whose semantic type has + at most the range and precision of float, to the range and + precision of float; evaluate all other operations and constants + to the range and precision of the semantic type. + 1 evaluate all operations and constants, whose semantic type has + at most the range and precision of double, to the range and + precision of double; evaluate all other operations and constants + to the range and precision of the semantic type. + 2 evaluate all operations and constants, whose semantic type has + at most the range and precision of long double, to the range and + precision of long double; evaluate all other operations and + constants to the range and precision of the semantic type. + N where _FloatN is a supported interchange floating type + evaluate all operations and constants, whose semantic type has + at most the range and precision of the _FloatN type, to the + range and precision of the _FloatN type; evaluate all other + operations and constants to the range and precision of the + semantic type. + N + 1, where _FloatNx is a supported extended floating type + evaluate operations and constants, whose semantic type has at + most the range and precision of the _FloatNx type, to the range + and precision of the _FloatNx type; evaluate all other + operations and constants to the range and precision of the + semantic type. + + The compiler predefines two macros: + + __FLT_EVAL_METHOD__ + Which, depending on the value given for + -fpermitted-flt-eval-methods, may be limited to only those values + for FLT_EVAL_METHOD defined in C99/C11. + + __FLT_EVAL_METHOD_TS_18661_3__ + Which always permits the values for FLT_EVAL_METHOD defined in + ISO/IEC TS 18661-3. + + Here we want to use __FLT_EVAL_METHOD__, unless + __STDC_WANT_IEC_60559_TYPES_EXT__ is defined, in which case the user + is specifically asking for the ISO/IEC TS 18661-3 types, so we use + __FLT_EVAL_METHOD_TS_18661_3__. + + ??? This ought to change with the setting of the fp control word; + the value provided by the compiler assumes the widest setting. */ +#undef FLT_EVAL_METHOD +#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__ +#define FLT_EVAL_METHOD __FLT_EVAL_METHOD_TS_18661_3__ +#else +#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__ +#endif + +/* Number of decimal digits, n, such that any floating-point number in the + widest supported floating type with pmax radix b digits can be rounded + to a floating-point number with n decimal digits and back again without + change to the value, + + pmax * log10(b) if b is a power of 10 + ceil(1 + pmax * log10(b)) otherwise +*/ +#undef DECIMAL_DIG +#define DECIMAL_DIG __DECIMAL_DIG__ + +#endif /* C99 */ + +#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) \ + || (defined (__cplusplus) && __cplusplus >= 201703L) +/* Versions of DECIMAL_DIG for each floating-point type. */ +#undef FLT_DECIMAL_DIG +#undef DBL_DECIMAL_DIG +#undef LDBL_DECIMAL_DIG +#define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__ +#define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__ +#define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__ + +/* Whether types support subnormal numbers. */ +#undef FLT_HAS_SUBNORM +#undef DBL_HAS_SUBNORM +#undef LDBL_HAS_SUBNORM +#define FLT_HAS_SUBNORM __FLT_HAS_DENORM__ +#define DBL_HAS_SUBNORM __DBL_HAS_DENORM__ +#define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__ + +/* Minimum positive values, including subnormals. */ +#undef FLT_TRUE_MIN +#undef DBL_TRUE_MIN +#undef LDBL_TRUE_MIN +#define FLT_TRUE_MIN __FLT_DENORM_MIN__ +#define DBL_TRUE_MIN __DBL_DENORM_MIN__ +#define LDBL_TRUE_MIN __LDBL_DENORM_MIN__ + +#endif /* C11 */ + +#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L +/* Maximum finite positive value with MANT_DIG digits in the + significand taking their maximum value. */ +#undef FLT_NORM_MAX +#undef DBL_NORM_MAX +#undef LDBL_NORM_MAX +#define FLT_NORM_MAX __FLT_NORM_MAX__ +#define DBL_NORM_MAX __DBL_NORM_MAX__ +#define LDBL_NORM_MAX __LDBL_NORM_MAX__ + +/* Whether each type matches an IEC 60559 format (1 for format, 2 for + format and operations). */ +#undef FLT_IS_IEC_60559 +#undef DBL_IS_IEC_60559 +#undef LDBL_IS_IEC_60559 +#define FLT_IS_IEC_60559 __FLT_IS_IEC_60559__ +#define DBL_IS_IEC_60559 __DBL_IS_IEC_60559__ +#define LDBL_IS_IEC_60559 __LDBL_IS_IEC_60559__ + +/* Infinity in type float, or overflow if infinity not supported. */ +#undef INFINITY +#define INFINITY (__builtin_inff ()) + +/* Quiet NaN, if supported for float. */ +#if __FLT_HAS_QUIET_NAN__ +#undef NAN +#define NAN (__builtin_nanf ("")) +#endif + +/* Signaling NaN, if supported for each type. All formats supported + by GCC support either both quiet and signaling NaNs, or neither + kind of NaN. */ +#if __FLT_HAS_QUIET_NAN__ +#undef FLT_SNAN +#define FLT_SNAN (__builtin_nansf ("")) +#endif +#if __DBL_HAS_QUIET_NAN__ +#undef DBL_SNAN +#define DBL_SNAN (__builtin_nans ("")) +#endif +#if __LDBL_HAS_QUIET_NAN__ +#undef LDBL_SNAN +#define LDBL_SNAN (__builtin_nansl ("")) +#endif + +#endif /* C2X */ + +#if (defined __STDC_WANT_IEC_60559_BFP_EXT__ \ + || defined __STDC_WANT_IEC_60559_EXT__) +/* Number of decimal digits for which conversions between decimal + character strings and binary formats, in both directions, are + correctly rounded. */ +#define CR_DECIMAL_DIG __UINTMAX_MAX__ +#endif + +#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__ +/* Constants for _FloatN and _FloatNx types from TS 18661-3. See + comments above for their semantics. */ + +#ifdef __FLT16_MANT_DIG__ +#undef FLT16_MANT_DIG +#define FLT16_MANT_DIG __FLT16_MANT_DIG__ +#undef FLT16_DIG +#define FLT16_DIG __FLT16_DIG__ +#undef FLT16_MIN_EXP +#define FLT16_MIN_EXP __FLT16_MIN_EXP__ +#undef FLT16_MIN_10_EXP +#define FLT16_MIN_10_EXP __FLT16_MIN_10_EXP__ +#undef FLT16_MAX_EXP +#define FLT16_MAX_EXP __FLT16_MAX_EXP__ +#undef FLT16_MAX_10_EXP +#define FLT16_MAX_10_EXP __FLT16_MAX_10_EXP__ +#undef FLT16_MAX +#define FLT16_MAX __FLT16_MAX__ +#undef FLT16_EPSILON +#define FLT16_EPSILON __FLT16_EPSILON__ +#undef FLT16_MIN +#define FLT16_MIN __FLT16_MIN__ +#undef FLT16_DECIMAL_DIG +#define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__ +#undef FLT16_TRUE_MIN +#define FLT16_TRUE_MIN __FLT16_DENORM_MIN__ +#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L +#undef FLT16_SNAN +#define FLT16_SNAN (__builtin_nansf16 ("")) +#endif /* C2X */ +#endif /* __FLT16_MANT_DIG__. */ + +#ifdef __FLT32_MANT_DIG__ +#undef FLT32_MANT_DIG +#define FLT32_MANT_DIG __FLT32_MANT_DIG__ +#undef FLT32_DIG +#define FLT32_DIG __FLT32_DIG__ +#undef FLT32_MIN_EXP +#define FLT32_MIN_EXP __FLT32_MIN_EXP__ +#undef FLT32_MIN_10_EXP +#define FLT32_MIN_10_EXP __FLT32_MIN_10_EXP__ +#undef FLT32_MAX_EXP +#define FLT32_MAX_EXP __FLT32_MAX_EXP__ +#undef FLT32_MAX_10_EXP +#define FLT32_MAX_10_EXP __FLT32_MAX_10_EXP__ +#undef FLT32_MAX +#define FLT32_MAX __FLT32_MAX__ +#undef FLT32_EPSILON +#define FLT32_EPSILON __FLT32_EPSILON__ +#undef FLT32_MIN +#define FLT32_MIN __FLT32_MIN__ +#undef FLT32_DECIMAL_DIG +#define FLT32_DECIMAL_DIG __FLT32_DECIMAL_DIG__ +#undef FLT32_TRUE_MIN +#define FLT32_TRUE_MIN __FLT32_DENORM_MIN__ +#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L +#undef FLT32_SNAN +#define FLT32_SNAN (__builtin_nansf32 ("")) +#endif /* C2X */ +#endif /* __FLT32_MANT_DIG__. */ + +#ifdef __FLT64_MANT_DIG__ +#undef FLT64_MANT_DIG +#define FLT64_MANT_DIG __FLT64_MANT_DIG__ +#undef FLT64_DIG +#define FLT64_DIG __FLT64_DIG__ +#undef FLT64_MIN_EXP +#define FLT64_MIN_EXP __FLT64_MIN_EXP__ +#undef FLT64_MIN_10_EXP +#define FLT64_MIN_10_EXP __FLT64_MIN_10_EXP__ +#undef FLT64_MAX_EXP +#define FLT64_MAX_EXP __FLT64_MAX_EXP__ +#undef FLT64_MAX_10_EXP +#define FLT64_MAX_10_EXP __FLT64_MAX_10_EXP__ +#undef FLT64_MAX +#define FLT64_MAX __FLT64_MAX__ +#undef FLT64_EPSILON +#define FLT64_EPSILON __FLT64_EPSILON__ +#undef FLT64_MIN +#define FLT64_MIN __FLT64_MIN__ +#undef FLT64_DECIMAL_DIG +#define FLT64_DECIMAL_DIG __FLT64_DECIMAL_DIG__ +#undef FLT64_TRUE_MIN +#define FLT64_TRUE_MIN __FLT64_DENORM_MIN__ +#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L +#undef FLT64_SNAN +#define FLT64_SNAN (__builtin_nansf64 ("")) +#endif /* C2X */ +#endif /* __FLT64_MANT_DIG__. */ + +#ifdef __FLT128_MANT_DIG__ +#undef FLT128_MANT_DIG +#define FLT128_MANT_DIG __FLT128_MANT_DIG__ +#undef FLT128_DIG +#define FLT128_DIG __FLT128_DIG__ +#undef FLT128_MIN_EXP +#define FLT128_MIN_EXP __FLT128_MIN_EXP__ +#undef FLT128_MIN_10_EXP +#define FLT128_MIN_10_EXP __FLT128_MIN_10_EXP__ +#undef FLT128_MAX_EXP +#define FLT128_MAX_EXP __FLT128_MAX_EXP__ +#undef FLT128_MAX_10_EXP +#define FLT128_MAX_10_EXP __FLT128_MAX_10_EXP__ +#undef FLT128_MAX +#define FLT128_MAX __FLT128_MAX__ +#undef FLT128_EPSILON +#define FLT128_EPSILON __FLT128_EPSILON__ +#undef FLT128_MIN +#define FLT128_MIN __FLT128_MIN__ +#undef FLT128_DECIMAL_DIG +#define FLT128_DECIMAL_DIG __FLT128_DECIMAL_DIG__ +#undef FLT128_TRUE_MIN +#define FLT128_TRUE_MIN __FLT128_DENORM_MIN__ +#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L +#undef FLT128_SNAN +#define FLT128_SNAN (__builtin_nansf128 ("")) +#endif /* C2X */ +#endif /* __FLT128_MANT_DIG__. */ + +#ifdef __FLT32X_MANT_DIG__ +#undef FLT32X_MANT_DIG +#define FLT32X_MANT_DIG __FLT32X_MANT_DIG__ +#undef FLT32X_DIG +#define FLT32X_DIG __FLT32X_DIG__ +#undef FLT32X_MIN_EXP +#define FLT32X_MIN_EXP __FLT32X_MIN_EXP__ +#undef FLT32X_MIN_10_EXP +#define FLT32X_MIN_10_EXP __FLT32X_MIN_10_EXP__ +#undef FLT32X_MAX_EXP +#define FLT32X_MAX_EXP __FLT32X_MAX_EXP__ +#undef FLT32X_MAX_10_EXP +#define FLT32X_MAX_10_EXP __FLT32X_MAX_10_EXP__ +#undef FLT32X_MAX +#define FLT32X_MAX __FLT32X_MAX__ +#undef FLT32X_EPSILON +#define FLT32X_EPSILON __FLT32X_EPSILON__ +#undef FLT32X_MIN +#define FLT32X_MIN __FLT32X_MIN__ +#undef FLT32X_DECIMAL_DIG +#define FLT32X_DECIMAL_DIG __FLT32X_DECIMAL_DIG__ +#undef FLT32X_TRUE_MIN +#define FLT32X_TRUE_MIN __FLT32X_DENORM_MIN__ +#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L +#undef FLT32X_SNAN +#define FLT32X_SNAN (__builtin_nansf32x ("")) +#endif /* C2X */ +#endif /* __FLT32X_MANT_DIG__. */ + +#ifdef __FLT64X_MANT_DIG__ +#undef FLT64X_MANT_DIG +#define FLT64X_MANT_DIG __FLT64X_MANT_DIG__ +#undef FLT64X_DIG +#define FLT64X_DIG __FLT64X_DIG__ +#undef FLT64X_MIN_EXP +#define FLT64X_MIN_EXP __FLT64X_MIN_EXP__ +#undef FLT64X_MIN_10_EXP +#define FLT64X_MIN_10_EXP __FLT64X_MIN_10_EXP__ +#undef FLT64X_MAX_EXP +#define FLT64X_MAX_EXP __FLT64X_MAX_EXP__ +#undef FLT64X_MAX_10_EXP +#define FLT64X_MAX_10_EXP __FLT64X_MAX_10_EXP__ +#undef FLT64X_MAX +#define FLT64X_MAX __FLT64X_MAX__ +#undef FLT64X_EPSILON +#define FLT64X_EPSILON __FLT64X_EPSILON__ +#undef FLT64X_MIN +#define FLT64X_MIN __FLT64X_MIN__ +#undef FLT64X_DECIMAL_DIG +#define FLT64X_DECIMAL_DIG __FLT64X_DECIMAL_DIG__ +#undef FLT64X_TRUE_MIN +#define FLT64X_TRUE_MIN __FLT64X_DENORM_MIN__ +#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L +#undef FLT64X_SNAN +#define FLT64X_SNAN (__builtin_nansf64x ("")) +#endif /* C2X */ +#endif /* __FLT64X_MANT_DIG__. */ + +#ifdef __FLT128X_MANT_DIG__ +#undef FLT128X_MANT_DIG +#define FLT128X_MANT_DIG __FLT128X_MANT_DIG__ +#undef FLT128X_DIG +#define FLT128X_DIG __FLT128X_DIG__ +#undef FLT128X_MIN_EXP +#define FLT128X_MIN_EXP __FLT128X_MIN_EXP__ +#undef FLT128X_MIN_10_EXP +#define FLT128X_MIN_10_EXP __FLT128X_MIN_10_EXP__ +#undef FLT128X_MAX_EXP +#define FLT128X_MAX_EXP __FLT128X_MAX_EXP__ +#undef FLT128X_MAX_10_EXP +#define FLT128X_MAX_10_EXP __FLT128X_MAX_10_EXP__ +#undef FLT128X_MAX +#define FLT128X_MAX __FLT128X_MAX__ +#undef FLT128X_EPSILON +#define FLT128X_EPSILON __FLT128X_EPSILON__ +#undef FLT128X_MIN +#define FLT128X_MIN __FLT128X_MIN__ +#undef FLT128X_DECIMAL_DIG +#define FLT128X_DECIMAL_DIG __FLT128X_DECIMAL_DIG__ +#undef FLT128X_TRUE_MIN +#define FLT128X_TRUE_MIN __FLT128X_DENORM_MIN__ +#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L +#undef FLT128X_SNAN +#define FLT128X_SNAN (__builtin_nansf128x ("")) +#endif /* C2X */ +#endif /* __FLT128X_MANT_DIG__. */ + +#endif /* __STDC_WANT_IEC_60559_TYPES_EXT__. */ + +#ifdef __DEC32_MANT_DIG__ +#if (defined __STDC_WANT_DEC_FP__ \ + || defined __STDC_WANT_IEC_60559_DFP_EXT__ \ + || (defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L)) +/* C2X; formerly Technical Report 24732, extension for decimal + floating-point arithmetic: Characteristic of decimal floating types + , and TS 18661-2. */ + +/* Number of base-FLT_RADIX digits in the significand, p. */ +#undef DEC32_MANT_DIG +#undef DEC64_MANT_DIG +#undef DEC128_MANT_DIG +#define DEC32_MANT_DIG __DEC32_MANT_DIG__ +#define DEC64_MANT_DIG __DEC64_MANT_DIG__ +#define DEC128_MANT_DIG __DEC128_MANT_DIG__ + +/* Minimum exponent. */ +#undef DEC32_MIN_EXP +#undef DEC64_MIN_EXP +#undef DEC128_MIN_EXP +#define DEC32_MIN_EXP __DEC32_MIN_EXP__ +#define DEC64_MIN_EXP __DEC64_MIN_EXP__ +#define DEC128_MIN_EXP __DEC128_MIN_EXP__ + +/* Maximum exponent. */ +#undef DEC32_MAX_EXP +#undef DEC64_MAX_EXP +#undef DEC128_MAX_EXP +#define DEC32_MAX_EXP __DEC32_MAX_EXP__ +#define DEC64_MAX_EXP __DEC64_MAX_EXP__ +#define DEC128_MAX_EXP __DEC128_MAX_EXP__ + +/* Maximum representable finite decimal floating-point number + (there are 6, 15, and 33 9s after the decimal points respectively). */ +#undef DEC32_MAX +#undef DEC64_MAX +#undef DEC128_MAX +#define DEC32_MAX __DEC32_MAX__ +#define DEC64_MAX __DEC64_MAX__ +#define DEC128_MAX __DEC128_MAX__ + +/* The difference between 1 and the least value greater than 1 that is + representable in the given floating point type. */ +#undef DEC32_EPSILON +#undef DEC64_EPSILON +#undef DEC128_EPSILON +#define DEC32_EPSILON __DEC32_EPSILON__ +#define DEC64_EPSILON __DEC64_EPSILON__ +#define DEC128_EPSILON __DEC128_EPSILON__ + +/* Minimum normalized positive floating-point number. */ +#undef DEC32_MIN +#undef DEC64_MIN +#undef DEC128_MIN +#define DEC32_MIN __DEC32_MIN__ +#define DEC64_MIN __DEC64_MIN__ +#define DEC128_MIN __DEC128_MIN__ + +/* The floating-point expression evaluation method. + -1 indeterminate + 0 evaluate all operations and constants just to the range and + precision of the type + 1 evaluate operations and constants of type _Decimal32 + and _Decimal64 to the range and precision of the _Decimal64 + type, evaluate _Decimal128 operations and constants to the + range and precision of the _Decimal128 type; + 2 evaluate all operations and constants to the range and + precision of the _Decimal128 type. */ + +#undef DEC_EVAL_METHOD +#define DEC_EVAL_METHOD __DEC_EVAL_METHOD__ + +#endif /* __STDC_WANT_DEC_FP__ || __STDC_WANT_IEC_60559_DFP_EXT__ || C2X. */ + +#ifdef __STDC_WANT_DEC_FP__ + +/* Minimum subnormal positive floating-point number. */ +#undef DEC32_SUBNORMAL_MIN +#undef DEC64_SUBNORMAL_MIN +#undef DEC128_SUBNORMAL_MIN +#define DEC32_SUBNORMAL_MIN __DEC32_SUBNORMAL_MIN__ +#define DEC64_SUBNORMAL_MIN __DEC64_SUBNORMAL_MIN__ +#define DEC128_SUBNORMAL_MIN __DEC128_SUBNORMAL_MIN__ + +#endif /* __STDC_WANT_DEC_FP__. */ + +#if (defined __STDC_WANT_IEC_60559_DFP_EXT__ \ + || (defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L)) + +/* Minimum subnormal positive floating-point number. */ +#undef DEC32_TRUE_MIN +#undef DEC64_TRUE_MIN +#undef DEC128_TRUE_MIN +#define DEC32_TRUE_MIN __DEC32_SUBNORMAL_MIN__ +#define DEC64_TRUE_MIN __DEC64_SUBNORMAL_MIN__ +#define DEC128_TRUE_MIN __DEC128_SUBNORMAL_MIN__ + +#endif /* __STDC_WANT_IEC_60559_DFP_EXT__ || C2X. */ + +#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L + +/* Infinity in type _Decimal32. */ +#undef DEC_INFINITY +#define DEC_INFINITY (__builtin_infd32 ()) + +/* Quiet NaN in type _Decimal32. */ +#undef DEC_NAN +#define DEC_NAN (__builtin_nand32 ("")) + +/* Signaling NaN in each decimal floating-point type. */ +#undef DEC32_SNAN +#define DEC32_SNAN (__builtin_nansd32 ("")) +#undef DEC64_SNAN +#define DEC64_SNAN (__builtin_nansd64 ("")) +#undef DEC128_SNAN +#define DEC128_SNAN (__builtin_nansd128 ("")) + +#endif /* C2X */ + +#endif /* __DEC32_MANT_DIG__ */ + +#endif /* _FLOAT_H___ */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/fma4intrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/fma4intrin.h new file mode 100644 index 0000000..c1792bf --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/fma4intrin.h @@ -0,0 +1,241 @@ +/* Copyright (C) 2007-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86INTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _FMA4INTRIN_H_INCLUDED +#define _FMA4INTRIN_H_INCLUDED + +/* We need definitions from the SSE4A, SSE3, SSE2 and SSE header files. */ +#include + +#ifndef __FMA4__ +#pragma GCC push_options +#pragma GCC target("fma4") +#define __DISABLE_FMA4__ +#endif /* __FMA4__ */ + +/* 128b Floating point multiply/add type instructions. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macc_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macc_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddpd ((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macc_ss (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macc_sd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd ((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_msub_ps (__m128 __A, __m128 __B, __m128 __C) + +{ + return (__m128) __builtin_ia32_vfmaddps ((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_msub_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddpd ((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_msub_ss (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss ((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_msub_sd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd ((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmacc_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddps (-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmacc_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddpd (-(__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmacc_ss (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss (-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmacc_sd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd (-(__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmsub_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddps (-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmsub_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddpd (-(__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmsub_ss (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss (-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_nmsub_sd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd (-(__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddsub_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddsubps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddsub_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsubpd ((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_msubadd_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddsubps ((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_msubadd_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsubpd ((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +/* 256b Floating point multiply/add type instructions. */ +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_macc_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_macc_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_msub_ps (__m256 __A, __m256 __B, __m256 __C) + +{ + return (__m256) __builtin_ia32_vfmaddps256 ((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_msub_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddpd256 ((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_nmacc_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddps256 (-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_nmacc_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddpd256 (-(__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_nmsub_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddps256 (-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_nmsub_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddpd256 (-(__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maddsub_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddsubps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maddsub_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddsubpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_msubadd_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_vfmaddsubps256 ((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_msubadd_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_vfmaddsubpd256 ((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +#ifdef __DISABLE_FMA4__ +#undef __DISABLE_FMA4__ +#pragma GCC pop_options +#endif /* __DISABLE_FMA4__ */ + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/fmaintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/fmaintrin.h new file mode 100644 index 0000000..e69e887 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/fmaintrin.h @@ -0,0 +1,302 @@ +/* Copyright (C) 2011-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _FMAINTRIN_H_INCLUDED +#define _FMAINTRIN_H_INCLUDED + +#ifndef __FMA__ +#pragma GCC push_options +#pragma GCC target("fma") +#define __DISABLE_FMA__ +#endif /* __FMA__ */ + +extern __inline __m128d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd ((__v2df)__A, (__v2df)__B, + (__v2df)__C); +} + +extern __inline __m256d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmadd_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256 ((__v4df)__A, (__v4df)__B, + (__v4df)__C); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps ((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C); +} + +extern __inline __m256 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmadd_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256 ((__v8sf)__A, (__v8sf)__B, + (__v8sf)__C); +} + +extern __inline __m128d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_sd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_vfmaddsd3 ((__v2df)__A, (__v2df)__B, + (__v2df)__C); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmadd_ss (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss3 ((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C); +} + +extern __inline __m128d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsub_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmsubpd ((__v2df)__A, (__v2df)__B, + (__v2df)__C); +} + +extern __inline __m256d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmsub_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmsubpd256 ((__v4df)__A, (__v4df)__B, + (__v4df)__C); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsub_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmsubps ((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C); +} + +extern __inline __m256 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmsub_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmsubps256 ((__v8sf)__A, (__v8sf)__B, + (__v8sf)__C); +} + +extern __inline __m128d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsub_sd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmsubsd3 ((__v2df)__A, (__v2df)__B, + (__v2df)__C); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsub_ss (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmsubss3 ((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C); +} + +extern __inline __m128d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmadd_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfnmaddpd ((__v2df)__A, (__v2df)__B, + (__v2df)__C); +} + +extern __inline __m256d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fnmadd_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfnmaddpd256 ((__v4df)__A, (__v4df)__B, + (__v4df)__C); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmadd_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfnmaddps ((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C); +} + +extern __inline __m256 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fnmadd_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfnmaddps256 ((__v8sf)__A, (__v8sf)__B, + (__v8sf)__C); +} + +extern __inline __m128d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmadd_sd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfnmaddsd3 ((__v2df)__A, (__v2df)__B, + (__v2df)__C); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmadd_ss (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfnmaddss3 ((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C); +} + +extern __inline __m128d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmsub_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfnmsubpd ((__v2df)__A, (__v2df)__B, + (__v2df)__C); +} + +extern __inline __m256d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fnmsub_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfnmsubpd256 ((__v4df)__A, (__v4df)__B, + (__v4df)__C); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmsub_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfnmsubps ((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C); +} + +extern __inline __m256 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fnmsub_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfnmsubps256 ((__v8sf)__A, (__v8sf)__B, + (__v8sf)__C); +} + +extern __inline __m128d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmsub_sd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfnmsubsd3 ((__v2df)__A, (__v2df)__B, + (__v2df)__C); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fnmsub_ss (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfnmsubss3 ((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C); +} + +extern __inline __m128d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmaddsub_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd ((__v2df)__A, (__v2df)__B, + (__v2df)__C); +} + +extern __inline __m256d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmaddsub_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256 ((__v4df)__A, + (__v4df)__B, + (__v4df)__C); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmaddsub_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps ((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C); +} + +extern __inline __m256 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmaddsub_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256 ((__v8sf)__A, + (__v8sf)__B, + (__v8sf)__C); +} + +extern __inline __m128d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsubadd_pd (__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd ((__v2df)__A, (__v2df)__B, + -(__v2df)__C); +} + +extern __inline __m256d +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmsubadd_pd (__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256 ((__v4df)__A, + (__v4df)__B, + -(__v4df)__C); +} + +extern __inline __m128 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_fmsubadd_ps (__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps ((__v4sf)__A, (__v4sf)__B, + -(__v4sf)__C); +} + +extern __inline __m256 +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_fmsubadd_ps (__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256 ((__v8sf)__A, + (__v8sf)__B, + -(__v8sf)__C); +} + +#ifdef __DISABLE_FMA__ +#undef __DISABLE_FMA__ +#pragma GCC pop_options +#endif /* __DISABLE_FMA__ */ + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/fxsrintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/fxsrintrin.h new file mode 100644 index 0000000..a0b6a18 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/fxsrintrin.h @@ -0,0 +1,73 @@ +/* Copyright (C) 2012-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _FXSRINTRIN_H_INCLUDED +#define _FXSRINTRIN_H_INCLUDED + +#ifndef __FXSR__ +#pragma GCC push_options +#pragma GCC target("fxsr") +#define __DISABLE_FXSR__ +#endif /* __FXSR__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_fxsave (void *__P) +{ + __builtin_ia32_fxsave (__P); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_fxrstor (void *__P) +{ + __builtin_ia32_fxrstor (__P); +} + +#ifdef __x86_64__ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_fxsave64 (void *__P) +{ + __builtin_ia32_fxsave64 (__P); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_fxrstor64 (void *__P) +{ + __builtin_ia32_fxrstor64 (__P); +} +#endif + +#ifdef __DISABLE_FXSR__ +#undef __DISABLE_FXSR__ +#pragma GCC pop_options +#endif /* __DISABLE_FXSR__ */ + + +#endif /* _FXSRINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/gcov.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/gcov.h new file mode 100644 index 0000000..cea9302 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/gcov.h @@ -0,0 +1,55 @@ +/* GCOV interface routines. + Copyright (C) 2017-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 3, or (at your option) any later + version. + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef GCC_GCOV_H +#define GCC_GCOV_H + +struct gcov_info; + +/* Set all counters to zero. */ + +extern void __gcov_reset (void); + +/* Write profile information to a file. */ + +extern void __gcov_dump (void); + +/* Convert the gcov information referenced by INFO to a gcda data stream. + The FILENAME_FN callback is called exactly once with the filename associated + with the gcov information. The filename may be NULL. Afterwards, the + DUMP_FN callback is subsequently called with chunks (the begin and length of + the chunk are passed as the first two callback parameters) of the gcda data + stream. The ALLOCATE_FN callback shall allocate memory with a size in + characters specified by the first callback parameter. The ARG parameter is + a user-provided argument passed as the last argument to the callback + functions. */ + +extern void +__gcov_info_to_gcda (const struct gcov_info *__info, + void (*__filename_fn) (const char *, void *), + void (*__dump_fn) (const void *, unsigned, void *), + void *(*__allocate_fn) (unsigned, void *), + void *__arg); + +#endif /* GCC_GCOV_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/gfniintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/gfniintrin.h new file mode 100644 index 0000000..17ae8d8 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/gfniintrin.h @@ -0,0 +1,414 @@ +/* Copyright (C) 2017-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _GFNIINTRIN_H_INCLUDED +#define _GFNIINTRIN_H_INCLUDED + +#if !defined(__GFNI__) || !defined(__SSE2__) +#pragma GCC push_options +#pragma GCC target("gfni,sse2") +#define __DISABLE_GFNI__ +#endif /* __GFNI__ */ + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_gf2p8mul_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi) __A, + (__v16qi) __B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_gf2p8affineinv_epi64_epi8 (__m128i __A, __m128i __B, const int __C) +{ + return (__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi ((__v16qi) __A, + (__v16qi) __B, + __C); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_gf2p8affine_epi64_epi8 (__m128i __A, __m128i __B, const int __C) +{ + return (__m128i) __builtin_ia32_vgf2p8affineqb_v16qi ((__v16qi) __A, + (__v16qi) __B, __C); +} +#else +#define _mm_gf2p8affineinv_epi64_epi8(A, B, C) \ + ((__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(C))) +#define _mm_gf2p8affine_epi64_epi8(A, B, C) \ + ((__m128i) __builtin_ia32_vgf2p8affineqb_v16qi ((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(C))) +#endif + +#ifdef __DISABLE_GFNI__ +#undef __DISABLE_GFNI__ +#pragma GCC pop_options +#endif /* __DISABLE_GFNI__ */ + +#if !defined(__GFNI__) || !defined(__AVX__) +#pragma GCC push_options +#pragma GCC target("gfni,avx") +#define __DISABLE_GFNIAVX__ +#endif /* __GFNIAVX__ */ + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_gf2p8mul_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi ((__v32qi) __A, + (__v32qi) __B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_gf2p8affineinv_epi64_epi8 (__m256i __A, __m256i __B, const int __C) +{ + return (__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi ((__v32qi) __A, + (__v32qi) __B, + __C); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_gf2p8affine_epi64_epi8 (__m256i __A, __m256i __B, const int __C) +{ + return (__m256i) __builtin_ia32_vgf2p8affineqb_v32qi ((__v32qi) __A, + (__v32qi) __B, __C); +} +#else +#define _mm256_gf2p8affineinv_epi64_epi8(A, B, C) \ + ((__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \ + (__v32qi)(__m256i)(B), \ + (int)(C))) +#define _mm256_gf2p8affine_epi64_epi8(A, B, C) \ + ((__m256i) __builtin_ia32_vgf2p8affineqb_v32qi ((__v32qi)(__m256i)(A), \ + ( __v32qi)(__m256i)(B), (int)(C))) +#endif + +#ifdef __DISABLE_GFNIAVX__ +#undef __DISABLE_GFNIAVX__ +#pragma GCC pop_options +#endif /* __GFNIAVX__ */ + +#if !defined(__GFNI__) || !defined(__AVX512VL__) +#pragma GCC push_options +#pragma GCC target("gfni,avx512vl") +#define __DISABLE_GFNIAVX512VL__ +#endif /* __GFNIAVX512VL__ */ + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_gf2p8mul_epi8 (__m128i __A, __mmask16 __B, __m128i __C, __m128i __D) +{ + return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi_mask ((__v16qi) __C, + (__v16qi) __D, + (__v16qi)__A, __B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_gf2p8mul_epi8 (__mmask16 __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi_mask ((__v16qi) __B, + (__v16qi) __C, (__v16qi) _mm_setzero_si128 (), __A); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_gf2p8affineinv_epi64_epi8 (__m128i __A, __mmask16 __B, __m128i __C, + __m128i __D, const int __E) +{ + return (__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi_mask ((__v16qi) __C, + (__v16qi) __D, + __E, + (__v16qi)__A, + __B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_gf2p8affineinv_epi64_epi8 (__mmask16 __A, __m128i __B, __m128i __C, + const int __D) +{ + return (__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi_mask ((__v16qi) __B, + (__v16qi) __C, __D, + (__v16qi) _mm_setzero_si128 (), + __A); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mask_gf2p8affine_epi64_epi8 (__m128i __A, __mmask16 __B, __m128i __C, + __m128i __D, const int __E) +{ + return (__m128i) __builtin_ia32_vgf2p8affineqb_v16qi_mask ((__v16qi) __C, + (__v16qi) __D, __E, (__v16qi)__A, __B); +} + +extern __inline __m128i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskz_gf2p8affine_epi64_epi8 (__mmask16 __A, __m128i __B, __m128i __C, + const int __D) +{ + return (__m128i) __builtin_ia32_vgf2p8affineqb_v16qi_mask ((__v16qi) __B, + (__v16qi) __C, __D, (__v16qi) _mm_setzero_si128 (), __A); +} +#else +#define _mm_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \ + ((__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi_mask( \ + (__v16qi)(__m128i)(C), (__v16qi)(__m128i)(D), \ + (int)(E), (__v16qi)(__m128i)(A), (__mmask16)(B))) +#define _mm_maskz_gf2p8affineinv_epi64_epi8(A, B, C, D) \ + ((__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi_mask( \ + (__v16qi)(__m128i)(B), (__v16qi)(__m128i)(C), \ + (int)(D), (__v16qi)(__m128i) _mm_setzero_si128 (), \ + (__mmask16)(A))) +#define _mm_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \ + ((__m128i) __builtin_ia32_vgf2p8affineqb_v16qi_mask((__v16qi)(__m128i)(C),\ + (__v16qi)(__m128i)(D), (int)(E), (__v16qi)(__m128i)(A), (__mmask16)(B))) +#define _mm_maskz_gf2p8affine_epi64_epi8(A, B, C, D) \ + ((__m128i) __builtin_ia32_vgf2p8affineqb_v16qi_mask((__v16qi)(__m128i)(B),\ + (__v16qi)(__m128i)(C), (int)(D), \ + (__v16qi)(__m128i) _mm_setzero_si128 (), (__mmask16)(A))) +#endif + +#ifdef __DISABLE_GFNIAVX512VL__ +#undef __DISABLE_GFNIAVX512VL__ +#pragma GCC pop_options +#endif /* __GFNIAVX512VL__ */ + +#if !defined(__GFNI__) || !defined(__AVX512VL__) || !defined(__AVX512BW__) +#pragma GCC push_options +#pragma GCC target("gfni,avx512vl,avx512bw") +#define __DISABLE_GFNIAVX512VLBW__ +#endif /* __GFNIAVX512VLBW__ */ + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_gf2p8mul_epi8 (__m256i __A, __mmask32 __B, __m256i __C, + __m256i __D) +{ + return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi_mask ((__v32qi) __C, + (__v32qi) __D, + (__v32qi)__A, __B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_gf2p8mul_epi8 (__mmask32 __A, __m256i __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi_mask ((__v32qi) __B, + (__v32qi) __C, (__v32qi) _mm256_setzero_si256 (), __A); +} + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_gf2p8affineinv_epi64_epi8 (__m256i __A, __mmask32 __B, + __m256i __C, __m256i __D, const int __E) +{ + return (__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask ((__v32qi) __C, + (__v32qi) __D, + __E, + (__v32qi)__A, + __B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_gf2p8affineinv_epi64_epi8 (__mmask32 __A, __m256i __B, + __m256i __C, const int __D) +{ + return (__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask ((__v32qi) __B, + (__v32qi) __C, __D, + (__v32qi) _mm256_setzero_si256 (), __A); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_mask_gf2p8affine_epi64_epi8 (__m256i __A, __mmask32 __B, __m256i __C, + __m256i __D, const int __E) +{ + return (__m256i) __builtin_ia32_vgf2p8affineqb_v32qi_mask ((__v32qi) __C, + (__v32qi) __D, + __E, + (__v32qi)__A, + __B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_maskz_gf2p8affine_epi64_epi8 (__mmask32 __A, __m256i __B, + __m256i __C, const int __D) +{ + return (__m256i) __builtin_ia32_vgf2p8affineqb_v32qi_mask ((__v32qi) __B, + (__v32qi) __C, __D, (__v32qi)_mm256_setzero_si256 (), __A); +} +#else +#define _mm256_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \ + ((__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask( \ + (__v32qi)(__m256i)(C), (__v32qi)(__m256i)(D), (int)(E), \ + (__v32qi)(__m256i)(A), (__mmask32)(B))) +#define _mm256_maskz_gf2p8affineinv_epi64_epi8(A, B, C, D) \ + ((__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask( \ + (__v32qi)(__m256i)(B), (__v32qi)(__m256i)(C), (int)(D), \ + (__v32qi)(__m256i) _mm256_setzero_si256 (), (__mmask32)(A))) +#define _mm256_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \ + ((__m256i) __builtin_ia32_vgf2p8affineqb_v32qi_mask((__v32qi)(__m256i)(C),\ + (__v32qi)(__m256i)(D), (int)(E), (__v32qi)(__m256i)(A), (__mmask32)(B))) +#define _mm256_maskz_gf2p8affine_epi64_epi8(A, B, C, D) \ + ((__m256i) __builtin_ia32_vgf2p8affineqb_v32qi_mask((__v32qi)(__m256i)(B),\ + (__v32qi)(__m256i)(C), (int)(D), \ + (__v32qi)(__m256i) _mm256_setzero_si256 (), (__mmask32)(A))) +#endif + +#ifdef __DISABLE_GFNIAVX512VLBW__ +#undef __DISABLE_GFNIAVX512VLBW__ +#pragma GCC pop_options +#endif /* __GFNIAVX512VLBW__ */ + +#if !defined(__GFNI__) || !defined(__AVX512F__) || !defined(__AVX512BW__) +#pragma GCC push_options +#pragma GCC target("gfni,avx512f,avx512bw") +#define __DISABLE_GFNIAVX512FBW__ +#endif /* __GFNIAVX512FBW__ */ + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_gf2p8mul_epi8 (__m512i __A, __mmask64 __B, __m512i __C, + __m512i __D) +{ + return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi_mask ((__v64qi) __C, + (__v64qi) __D, (__v64qi)__A, __B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_gf2p8mul_epi8 (__mmask64 __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi_mask ((__v64qi) __B, + (__v64qi) __C, (__v64qi) _mm512_setzero_si512 (), __A); +} +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_gf2p8mul_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi ((__v64qi) __A, + (__v64qi) __B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_gf2p8affineinv_epi64_epi8 (__m512i __A, __mmask64 __B, __m512i __C, + __m512i __D, const int __E) +{ + return (__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask ((__v64qi) __C, + (__v64qi) __D, + __E, + (__v64qi)__A, + __B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_gf2p8affineinv_epi64_epi8 (__mmask64 __A, __m512i __B, + __m512i __C, const int __D) +{ + return (__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask ((__v64qi) __B, + (__v64qi) __C, __D, + (__v64qi) _mm512_setzero_si512 (), __A); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_gf2p8affineinv_epi64_epi8 (__m512i __A, __m512i __B, const int __C) +{ + return (__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi ((__v64qi) __A, + (__v64qi) __B, __C); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_mask_gf2p8affine_epi64_epi8 (__m512i __A, __mmask64 __B, __m512i __C, + __m512i __D, const int __E) +{ + return (__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask ((__v64qi) __C, + (__v64qi) __D, __E, (__v64qi)__A, __B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_maskz_gf2p8affine_epi64_epi8 (__mmask64 __A, __m512i __B, __m512i __C, + const int __D) +{ + return (__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask ((__v64qi) __B, + (__v64qi) __C, __D, (__v64qi) _mm512_setzero_si512 (), __A); +} +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_gf2p8affine_epi64_epi8 (__m512i __A, __m512i __B, const int __C) +{ + return (__m512i) __builtin_ia32_vgf2p8affineqb_v64qi ((__v64qi) __A, + (__v64qi) __B, __C); +} +#else +#define _mm512_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \ + ((__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask( \ + (__v64qi)(__m512i)(C), (__v64qi)(__m512i)(D), (int)(E), \ + (__v64qi)(__m512i)(A), (__mmask64)(B))) +#define _mm512_maskz_gf2p8affineinv_epi64_epi8(A, B, C, D) \ + ((__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask( \ + (__v64qi)(__m512i)(B), (__v64qi)(__m512i)(C), (int)(D), \ + (__v64qi)(__m512i) _mm512_setzero_si512 (), (__mmask64)(A))) +#define _mm512_gf2p8affineinv_epi64_epi8(A, B, C) \ + ((__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi ( \ + (__v64qi)(__m512i)(A), (__v64qi)(__m512i)(B), (int)(C))) +#define _mm512_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \ + ((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask((__v64qi)(__m512i)(C),\ + (__v64qi)(__m512i)(D), (int)(E), (__v64qi)(__m512i)(A), (__mmask64)(B))) +#define _mm512_maskz_gf2p8affine_epi64_epi8(A, B, C, D) \ + ((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask((__v64qi)(__m512i)(B),\ + (__v64qi)(__m512i)(C), (int)(D), \ + (__v64qi)(__m512i) _mm512_setzero_si512 (), (__mmask64)(A))) +#define _mm512_gf2p8affine_epi64_epi8(A, B, C) \ + ((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi ((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), (int)(C))) +#endif + +#ifdef __DISABLE_GFNIAVX512FBW__ +#undef __DISABLE_GFNIAVX512FBW__ +#pragma GCC pop_options +#endif /* __GFNIAVX512FBW__ */ + +#endif /* _GFNIINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/hresetintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/hresetintrin.h new file mode 100644 index 0000000..3e4f227 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/hresetintrin.h @@ -0,0 +1,48 @@ +/* Copyright (C) 2020-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _HRESETINTRIN_H_INCLUDED +#define _HRESETINTRIN_H_INCLUDED + +#ifndef __HRESET__ +#pragma GCC push_options +#pragma GCC target ("hreset") +#define __DISABLE_HRESET__ +#endif /* __HRESET__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_hreset (unsigned int __EAX) +{ + __builtin_ia32_hreset (__EAX); +} + +#ifdef __DISABLE_HRESET__ +#undef __DISABLE_HRESET__ +#pragma GCC pop_options +#endif /* __DISABLE_HRESET__ */ +#endif /* _HRESETINTRIN_H_INCLUDED. */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/ia32intrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/ia32intrin.h new file mode 100644 index 0000000..fb437d7 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/ia32intrin.h @@ -0,0 +1,317 @@ +/* Copyright (C) 2009-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +/* 32bit bsf */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bsfd (int __X) +{ + return __builtin_ctz (__X); +} + +/* 32bit bsr */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bsrd (int __X) +{ + return __builtin_ia32_bsrsi (__X); +} + +/* 32bit bswap */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bswapd (int __X) +{ + return __builtin_bswap32 (__X); +} + +#ifndef __iamcu__ + +#ifndef __CRC32__ +#pragma GCC push_options +#pragma GCC target("crc32") +#define __DISABLE_CRC32__ +#endif /* __CRC32__ */ + +/* 32bit accumulate CRC32 (polynomial 0x11EDC6F41) value. */ +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__crc32b (unsigned int __C, unsigned char __V) +{ + return __builtin_ia32_crc32qi (__C, __V); +} + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__crc32w (unsigned int __C, unsigned short __V) +{ + return __builtin_ia32_crc32hi (__C, __V); +} + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__crc32d (unsigned int __C, unsigned int __V) +{ + return __builtin_ia32_crc32si (__C, __V); +} + +#ifdef __DISABLE_CRC32__ +#undef __DISABLE_CRC32__ +#pragma GCC pop_options +#endif /* __DISABLE_CRC32__ */ + +#endif /* __iamcu__ */ + +/* 32bit popcnt */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__popcntd (unsigned int __X) +{ + return __builtin_popcount (__X); +} + +#ifndef __iamcu__ + +/* rdpmc */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rdpmc (int __S) +{ + return __builtin_ia32_rdpmc (__S); +} + +#endif /* __iamcu__ */ + +/* rdtsc */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rdtsc (void) +{ + return __builtin_ia32_rdtsc (); +} + +#ifndef __iamcu__ + +/* rdtscp */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rdtscp (unsigned int *__A) +{ + return __builtin_ia32_rdtscp (__A); +} + +#endif /* __iamcu__ */ + +/* 8bit rol */ +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rolb (unsigned char __X, int __C) +{ + return __builtin_ia32_rolqi (__X, __C); +} + +/* 16bit rol */ +extern __inline unsigned short +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rolw (unsigned short __X, int __C) +{ + return __builtin_ia32_rolhi (__X, __C); +} + +/* 32bit rol */ +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rold (unsigned int __X, int __C) +{ + __C &= 31; + return (__X << __C) | (__X >> (-__C & 31)); +} + +/* 8bit ror */ +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rorb (unsigned char __X, int __C) +{ + return __builtin_ia32_rorqi (__X, __C); +} + +/* 16bit ror */ +extern __inline unsigned short +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rorw (unsigned short __X, int __C) +{ + return __builtin_ia32_rorhi (__X, __C); +} + +/* 32bit ror */ +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rord (unsigned int __X, int __C) +{ + __C &= 31; + return (__X >> __C) | (__X << (-__C & 31)); +} + +/* Pause */ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__pause (void) +{ + __builtin_ia32_pause (); +} + +#ifdef __x86_64__ +/* 64bit bsf */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bsfq (long long __X) +{ + return __builtin_ctzll (__X); +} + +/* 64bit bsr */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bsrq (long long __X) +{ + return __builtin_ia32_bsrdi (__X); +} + +/* 64bit bswap */ +extern __inline long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bswapq (long long __X) +{ + return __builtin_bswap64 (__X); +} + +#ifndef __CRC32__ +#pragma GCC push_options +#pragma GCC target("crc32") +#define __DISABLE_CRC32__ +#endif /* __CRC32__ */ + +/* 64bit accumulate CRC32 (polynomial 0x11EDC6F41) value. */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__crc32q (unsigned long long __C, unsigned long long __V) +{ + return __builtin_ia32_crc32di (__C, __V); +} + +#ifdef __DISABLE_CRC32__ +#undef __DISABLE_CRC32__ +#pragma GCC pop_options +#endif /* __DISABLE_CRC32__ */ + +/* 64bit popcnt */ +extern __inline long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__popcntq (unsigned long long __X) +{ + return __builtin_popcountll (__X); +} + +/* 64bit rol */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rolq (unsigned long long __X, int __C) +{ + __C &= 63; + return (__X << __C) | (__X >> (-__C & 63)); +} + +/* 64bit ror */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__rorq (unsigned long long __X, int __C) +{ + __C &= 63; + return (__X >> __C) | (__X << (-__C & 63)); +} + +/* Read flags register */ +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__readeflags (void) +{ + return __builtin_ia32_readeflags_u64 (); +} + +/* Write flags register */ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__writeeflags (unsigned long long __X) +{ + __builtin_ia32_writeeflags_u64 (__X); +} + +#define _bswap64(a) __bswapq(a) +#define _popcnt64(a) __popcntq(a) +#else + +/* Read flags register */ +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__readeflags (void) +{ + return __builtin_ia32_readeflags_u32 (); +} + +/* Write flags register */ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__writeeflags (unsigned int __X) +{ + __builtin_ia32_writeeflags_u32 (__X); +} + +#endif + +/* On LP64 systems, longs are 64-bit. Use the appropriate rotate + * function. */ +#ifdef __LP64__ +#define _lrotl(a,b) __rolq((a), (b)) +#define _lrotr(a,b) __rorq((a), (b)) +#else +#define _lrotl(a,b) __rold((a), (b)) +#define _lrotr(a,b) __rord((a), (b)) +#endif + +#define _bit_scan_forward(a) __bsfd(a) +#define _bit_scan_reverse(a) __bsrd(a) +#define _bswap(a) __bswapd(a) +#define _popcnt32(a) __popcntd(a) +#ifndef __iamcu__ +#define _rdpmc(a) __rdpmc(a) +#define _rdtscp(a) __rdtscp(a) +#endif /* __iamcu__ */ +#define _rdtsc() __rdtsc() +#define _rotwl(a,b) __rolw((a), (b)) +#define _rotwr(a,b) __rorw((a), (b)) +#define _rotl(a,b) __rold((a), (b)) +#define _rotr(a,b) __rord((a), (b)) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/immintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/immintrin.h new file mode 100644 index 0000000..6afd78c --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/immintrin.h @@ -0,0 +1,131 @@ +/* Copyright (C) 2008-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#define _IMMINTRIN_H_INCLUDED + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#ifdef __SSE2__ +#include + +#include +#endif + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#endif /* _IMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/iso646.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/iso646.h new file mode 100644 index 0000000..9a46267 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/iso646.h @@ -0,0 +1,45 @@ +/* Copyright (C) 1997-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* + * ISO C Standard: 7.9 Alternative spellings + */ + +#ifndef _ISO646_H +#define _ISO646_H + +#ifndef __cplusplus +#define and && +#define and_eq &= +#define bitand & +#define bitor | +#define compl ~ +#define not ! +#define not_eq != +#define or || +#define or_eq |= +#define xor ^ +#define xor_eq ^= +#endif + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/keylockerintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/keylockerintrin.h new file mode 100644 index 0000000..780ab0f --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/keylockerintrin.h @@ -0,0 +1,129 @@ +/* Copyright (C) 2018-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _IMMINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _KEYLOCKERINTRIN_H_INCLUDED +#define _KEYLOCKERINTRIN_H_INCLUDED + +#ifndef __KL__ +#pragma GCC push_options +#pragma GCC target("kl") +#define __DISABLE_KL__ +#endif /* __KL__ */ + + +extern __inline +void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadiwkey (unsigned int __I, __m128i __A, __m128i __B, __m128i __C) +{ + __builtin_ia32_loadiwkey ((__v2di) __B, (__v2di) __C, (__v2di) __A, __I); +} + +extern __inline +unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_encodekey128_u32 (unsigned int __I, __m128i __A, void * __P) +{ + return __builtin_ia32_encodekey128_u32 (__I, (__v2di)__A, __P); +} + +extern __inline +unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_encodekey256_u32 (unsigned int __I, __m128i __A, __m128i __B, void * __P) +{ + return __builtin_ia32_encodekey256_u32 (__I, (__v2di)__A, (__v2di)__B, __P); +} + +extern __inline +unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesdec128kl_u8 (__m128i * __A, __m128i __B, const void * __P) +{ + return __builtin_ia32_aesdec128kl_u8 ((__v2di *) __A, (__v2di) __B, __P); +} + +extern __inline +unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesdec256kl_u8 (__m128i * __A, __m128i __B, const void * __P) +{ + return __builtin_ia32_aesdec256kl_u8 ((__v2di *) __A, (__v2di) __B, __P); +} + +extern __inline +unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesenc128kl_u8 (__m128i * __A, __m128i __B, const void * __P) +{ + return __builtin_ia32_aesenc128kl_u8 ((__v2di *) __A, (__v2di) __B, __P); +} + +extern __inline +unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesenc256kl_u8 (__m128i * __A, __m128i __B, const void * __P) +{ + return __builtin_ia32_aesenc256kl_u8 ((__v2di *) __A, (__v2di) __B, __P); +} + +#ifdef __DISABLE_KL__ +#undef __DISABLE_KL__ +#pragma GCC pop_options +#endif /* __DISABLE_KL__ */ + +#ifndef __WIDEKL__ +#pragma GCC push_options +#pragma GCC target("widekl") +#define __DISABLE_WIDEKL__ +#endif /* __WIDEKL__ */ + +extern __inline +unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesdecwide128kl_u8(__m128i __A[8], const __m128i __B[8], const void * __P) +{ + return __builtin_ia32_aesdecwide128kl_u8 ((__v2di *) __A, (__v2di *) __B, __P); +} + +extern __inline +unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesdecwide256kl_u8(__m128i __A[8], const __m128i __B[8], const void * __P) +{ + return __builtin_ia32_aesdecwide256kl_u8 ((__v2di *) __A, (__v2di *) __B, __P); +} + +extern __inline +unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesencwide128kl_u8(__m128i __A[8], const __m128i __B[8], const void * __P) +{ + return __builtin_ia32_aesencwide128kl_u8 ((__v2di *) __A, (__v2di *) __B, __P); +} + +extern __inline +unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesencwide256kl_u8(__m128i __A[8], const __m128i __B[8], const void * __P) +{ + return __builtin_ia32_aesencwide256kl_u8 ((__v2di *) __A, (__v2di *) __B, __P); +} +#ifdef __DISABLE_WIDEKL__ +#undef __DISABLE_WIDEKL__ +#pragma GCC pop_options +#endif /* __DISABLE_WIDEKL__ */ +#endif /* _KEYLOCKERINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/lwpintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/lwpintrin.h new file mode 100644 index 0000000..817a835 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/lwpintrin.h @@ -0,0 +1,107 @@ +/* Copyright (C) 2007-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _LWPINTRIN_H_INCLUDED +#define _LWPINTRIN_H_INCLUDED + +#ifndef __LWP__ +#pragma GCC push_options +#pragma GCC target("lwp") +#define __DISABLE_LWP__ +#endif /* __LWP__ */ + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__llwpcb (void *__pcbAddress) +{ + __builtin_ia32_llwpcb (__pcbAddress); +} + +extern __inline void * __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__slwpcb (void) +{ + return __builtin_ia32_slwpcb (); +} + +#ifdef __OPTIMIZE__ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lwpval32 (unsigned int __data2, unsigned int __data1, unsigned int __flags) +{ + __builtin_ia32_lwpval32 (__data2, __data1, __flags); +} + +#ifdef __x86_64__ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lwpval64 (unsigned long long __data2, unsigned int __data1, + unsigned int __flags) +{ + __builtin_ia32_lwpval64 (__data2, __data1, __flags); +} +#endif +#else +#define __lwpval32(D2, D1, F) \ + (__builtin_ia32_lwpval32 ((unsigned int) (D2), (unsigned int) (D1), \ + (unsigned int) (F))) +#ifdef __x86_64__ +#define __lwpval64(D2, D1, F) \ + (__builtin_ia32_lwpval64 ((unsigned long long) (D2), (unsigned int) (D1), \ + (unsigned int) (F))) +#endif +#endif + + +#ifdef __OPTIMIZE__ +extern __inline unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lwpins32 (unsigned int __data2, unsigned int __data1, unsigned int __flags) +{ + return __builtin_ia32_lwpins32 (__data2, __data1, __flags); +} + +#ifdef __x86_64__ +extern __inline unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lwpins64 (unsigned long long __data2, unsigned int __data1, + unsigned int __flags) +{ + return __builtin_ia32_lwpins64 (__data2, __data1, __flags); +} +#endif +#else +#define __lwpins32(D2, D1, F) \ + (__builtin_ia32_lwpins32 ((unsigned int) (D2), (unsigned int) (D1), \ + (unsigned int) (F))) +#ifdef __x86_64__ +#define __lwpins64(D2, D1, F) \ + (__builtin_ia32_lwpins64 ((unsigned long long) (D2), (unsigned int) (D1), \ + (unsigned int) (F))) +#endif +#endif + +#ifdef __DISABLE_LWP__ +#undef __DISABLE_LWP__ +#pragma GCC pop_options +#endif /* __DISABLE_LWP__ */ + +#endif /* _LWPINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/lzcntintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/lzcntintrin.h new file mode 100644 index 0000000..84a4797 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/lzcntintrin.h @@ -0,0 +1,75 @@ +/* Copyright (C) 2009-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + + +#ifndef _LZCNTINTRIN_H_INCLUDED +#define _LZCNTINTRIN_H_INCLUDED + +#ifndef __LZCNT__ +#pragma GCC push_options +#pragma GCC target("lzcnt") +#define __DISABLE_LZCNT__ +#endif /* __LZCNT__ */ + +extern __inline unsigned short __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lzcnt16 (unsigned short __X) +{ + return __builtin_ia32_lzcnt_u16 (__X); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lzcnt32 (unsigned int __X) +{ + return __builtin_ia32_lzcnt_u32 (__X); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_lzcnt_u32 (unsigned int __X) +{ + return __builtin_ia32_lzcnt_u32 (__X); +} + +#ifdef __x86_64__ +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__lzcnt64 (unsigned long long __X) +{ + return __builtin_ia32_lzcnt_u64 (__X); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_lzcnt_u64 (unsigned long long __X) +{ + return __builtin_ia32_lzcnt_u64 (__X); +} +#endif + +#ifdef __DISABLE_LZCNT__ +#undef __DISABLE_LZCNT__ +#pragma GCC pop_options +#endif /* __DISABLE_LZCNT__ */ + +#endif /* _LZCNTINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/mm3dnow.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/mm3dnow.h new file mode 100644 index 0000000..d61ff2c --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/mm3dnow.h @@ -0,0 +1,233 @@ +/* Copyright (C) 2004-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Implemented from the mm3dnow.h (of supposedly AMD origin) included with + MSVC 7.1. */ + +#ifndef _MM3DNOW_H_INCLUDED +#define _MM3DNOW_H_INCLUDED + +#include +#include + +#if defined __x86_64__ && !defined __SSE__ || !defined __3dNOW__ +#pragma GCC push_options +#ifdef __x86_64__ +#pragma GCC target("sse,3dnow") +#else +#pragma GCC target("3dnow") +#endif +#define __DISABLE_3dNOW__ +#endif /* __3dNOW__ */ + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_femms (void) +{ + __builtin_ia32_femms(); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pavgusb (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pavgusb ((__v8qi)__A, (__v8qi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pf2id (__m64 __A) +{ + return (__m64)__builtin_ia32_pf2id ((__v2sf)__A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfacc ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfadd (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfadd ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfcmpeq (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpeq ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfcmpge (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpge ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfcmpgt (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfcmpgt ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfmax (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmax ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfmin (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmin ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfmul (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfmul ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfrcp (__m64 __A) +{ + return (__m64)__builtin_ia32_pfrcp ((__v2sf)__A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfrcpit1 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrcpit1 ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfrcpit2 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrcpit2 ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfrsqrt (__m64 __A) +{ + return (__m64)__builtin_ia32_pfrsqrt ((__v2sf)__A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfrsqit1 (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfrsqit1 ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfsub (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfsub ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfsubr (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfsubr ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pi2fd (__m64 __A) +{ + return (__m64)__builtin_ia32_pi2fd ((__v2si)__A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmulhrw (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pmulhrw ((__v4hi)__A, (__v4hi)__B); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_prefetch (void *__P) +{ + __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_from_float (float __A) +{ + return __extension__ (__m64)(__v2sf){ __A, 0.0f }; +} + +extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_to_float (__m64 __A) +{ + union { __v2sf v; float a[2]; } __tmp; + __tmp.v = (__v2sf)__A; + return __tmp.a[0]; +} + +#ifdef __DISABLE_3dNOW__ +#undef __DISABLE_3dNOW__ +#pragma GCC pop_options +#endif /* __DISABLE_3dNOW__ */ + +#if defined __x86_64__ && !defined __SSE__ || !defined __3dNOW_A__ +#pragma GCC push_options +#ifdef __x86_64__ +#pragma GCC target("sse,3dnowa") +#else +#pragma GCC target("3dnowa") +#endif +#define __DISABLE_3dNOW_A__ +#endif /* __3dNOW_A__ */ + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pf2iw (__m64 __A) +{ + return (__m64)__builtin_ia32_pf2iw ((__v2sf)__A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfnacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfnacc ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pfpnacc (__m64 __A, __m64 __B) +{ + return (__m64)__builtin_ia32_pfpnacc ((__v2sf)__A, (__v2sf)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pi2fw (__m64 __A) +{ + return (__m64)__builtin_ia32_pi2fw ((__v2si)__A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pswapd (__m64 __A) +{ + return (__m64)__builtin_ia32_pswapdsf ((__v2sf)__A); +} + +#ifdef __DISABLE_3dNOW_A__ +#undef __DISABLE_3dNOW_A__ +#pragma GCC pop_options +#endif /* __DISABLE_3dNOW_A__ */ + +#endif /* _MM3DNOW_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/mm_malloc.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/mm_malloc.h new file mode 100644 index 0000000..3b2e4f4 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/mm_malloc.h @@ -0,0 +1,57 @@ +/* Copyright (C) 2004-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _MM_MALLOC_H_INCLUDED +#define _MM_MALLOC_H_INCLUDED + +#include + +/* We can't depend on since the prototype of posix_memalign + may not be visible. */ +#ifndef __cplusplus +extern int posix_memalign (void **, size_t, size_t); +#else +extern "C" int posix_memalign (void **, size_t, size_t) throw (); +#endif + +static __inline void * +_mm_malloc (size_t __size, size_t __alignment) +{ + void *__ptr; + if (__alignment == 1) + return malloc (__size); + if (__alignment == 2 || (sizeof (void *) == 8 && __alignment == 4)) + __alignment = sizeof (void *); + if (posix_memalign (&__ptr, __alignment, __size) == 0) + return __ptr; + else + return NULL; +} + +static __inline void +_mm_free (void *__ptr) +{ + free (__ptr); +} + +#endif /* _MM_MALLOC_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/mmintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/mmintrin.h new file mode 100644 index 0000000..83263e4 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/mmintrin.h @@ -0,0 +1,965 @@ +/* Copyright (C) 2002-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _MMINTRIN_H_INCLUDED +#define _MMINTRIN_H_INCLUDED + +#if defined __x86_64__ && !defined __SSE__ || !defined __MMX__ +#pragma GCC push_options +#ifdef __MMX_WITH_SSE__ +#pragma GCC target("sse2") +#elif defined __x86_64__ +#pragma GCC target("sse,mmx") +#else +#pragma GCC target("mmx") +#endif +#define __DISABLE_MMX__ +#endif /* __MMX__ */ + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef int __m64 __attribute__ ((__vector_size__ (8), __may_alias__)); +typedef int __m32 __attribute__ ((__vector_size__ (4), __may_alias__)); +typedef short __m16 __attribute__ ((__vector_size__ (2), __may_alias__)); + +/* Unaligned version of the same type */ +typedef int __m64_u __attribute__ ((__vector_size__ (8), __may_alias__, __aligned__ (1))); +typedef int __m32_u __attribute__ ((__vector_size__ (4), \ + __may_alias__, __aligned__ (1))); +typedef short __m16_u __attribute__ ((__vector_size__ (2), \ + __may_alias__, __aligned__ (1))); + +/* Internal data types for implementing the intrinsics. */ +typedef int __v2si __attribute__ ((__vector_size__ (8))); +typedef short __v4hi __attribute__ ((__vector_size__ (8))); +typedef char __v8qi __attribute__ ((__vector_size__ (8))); +typedef long long __v1di __attribute__ ((__vector_size__ (8))); +typedef float __v2sf __attribute__ ((__vector_size__ (8))); + +/* Empty the multimedia state. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_empty (void) +{ + __builtin_ia32_emms (); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_empty (void) +{ + _mm_empty (); +} + +/* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi32_si64 (int __i) +{ + return (__m64) __builtin_ia32_vec_init_v2si (__i, 0); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_from_int (int __i) +{ + return _mm_cvtsi32_si64 (__i); +} + +#ifdef __x86_64__ +/* Convert I to a __m64 object. */ + +/* Intel intrinsic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_from_int64 (long long __i) +{ + return (__m64) __i; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_m64 (long long __i) +{ + return (__m64) __i; +} + +/* Microsoft intrinsic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64x_si64 (long long __i) +{ + return (__m64) __i; +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pi64x (long long __i) +{ + return (__m64) __i; +} +#endif + +/* Convert the lower 32 bits of the __m64 object into an integer. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_si32 (__m64 __i) +{ + return __builtin_ia32_vec_ext_v2si ((__v2si)__i, 0); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_to_int (__m64 __i) +{ + return _mm_cvtsi64_si32 (__i); +} + +#ifdef __x86_64__ +/* Convert the __m64 object to a 64bit integer. */ + +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_to_int64 (__m64 __i) +{ + return (long long)__i; +} + +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtm64_si64 (__m64 __i) +{ + return (long long)__i; +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_si64x (__m64 __i) +{ + return (long long)__i; +} +#endif + +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with signed saturation. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packs_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_packsswb ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_packsswb (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pi16 (__m1, __m2); +} + +/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of + the result, and the two 32-bit values from M2 into the upper two 16-bit + values of the result, all with signed saturation. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packs_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_packssdw ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_packssdw (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pi32 (__m1, __m2); +} + +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with unsigned saturation. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packs_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_packuswb ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_packuswb (__m64 __m1, __m64 __m2) +{ + return _mm_packs_pu16 (__m1, __m2); +} + +/* Interleave the four 8-bit values from the high half of M1 with the four + 8-bit values from the high half of M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckhbw ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_punpckhbw (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi8 (__m1, __m2); +} + +/* Interleave the two 16-bit values from the high half of M1 with the two + 16-bit values from the high half of M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckhwd ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_punpckhwd (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi16 (__m1, __m2); +} + +/* Interleave the 32-bit value from the high half of M1 with the 32-bit + value from the high half of M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckhdq ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_punpckhdq (__m64 __m1, __m64 __m2) +{ + return _mm_unpackhi_pi32 (__m1, __m2); +} + +/* Interleave the four 8-bit values from the low half of M1 with the four + 8-bit values from the low half of M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpcklbw ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_punpcklbw (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi8 (__m1, __m2); +} + +/* Interleave the two 16-bit values from the low half of M1 with the two + 16-bit values from the low half of M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpcklwd ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_punpcklwd (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi16 (__m1, __m2); +} + +/* Interleave the 32-bit value from the low half of M1 with the 32-bit + value from the low half of M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_punpckldq ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_punpckldq (__m64 __m1, __m64 __m2) +{ + return _mm_unpacklo_pi32 (__m1, __m2); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddb (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi8 (__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddw (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi16 (__m1, __m2); +} + +/* Add the 32-bit values in M1 to the 32-bit values in M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddd ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddd (__m64 __m1, __m64 __m2) +{ + return _mm_add_pi32 (__m1, __m2); +} + +/* Add the 64-bit values in M1 to the 64-bit values in M2. */ +#ifndef __SSE2__ +#pragma GCC push_options +#ifdef __MMX_WITH_SSE__ +#pragma GCC target("sse2") +#else +#pragma GCC target("sse2,mmx") +#endif +#define __DISABLE_SSE2__ +#endif /* __SSE2__ */ + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_si64 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddq ((__v1di)__m1, (__v1di)__m2); +} +#ifdef __DISABLE_SSE2__ +#undef __DISABLE_SSE2__ +#pragma GCC pop_options +#endif /* __DISABLE_SSE2__ */ + +/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed + saturated arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddsb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddsb (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pi8 (__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed + saturated arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddsw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddsw (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pi16 (__m1, __m2); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned + saturated arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_pu8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddusb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddusb (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pu8 (__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned + saturated arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_adds_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_paddusw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_paddusw (__m64 __m1, __m64 __m2) +{ + return _mm_adds_pu16 (__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubb (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi8 (__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubw (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi16 (__m1, __m2); +} + +/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubd ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubd (__m64 __m1, __m64 __m2) +{ + return _mm_sub_pi32 (__m1, __m2); +} + +/* Add the 64-bit values in M1 to the 64-bit values in M2. */ +#ifndef __SSE2__ +#pragma GCC push_options +#ifdef __MMX_WITH_SSE__ +#pragma GCC target("sse2") +#else +#pragma GCC target("sse2,mmx") +#endif +#define __DISABLE_SSE2__ +#endif /* __SSE2__ */ + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_si64 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubq ((__v1di)__m1, (__v1di)__m2); +} +#ifdef __DISABLE_SSE2__ +#undef __DISABLE_SSE2__ +#pragma GCC pop_options +#endif /* __DISABLE_SSE2__ */ + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed + saturating arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubsb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubsb (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pi8 (__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + signed saturating arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubsw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubsw (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pi16 (__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using + unsigned saturating arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_pu8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubusb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubusb (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pu8 (__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + unsigned saturating arithmetic. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_subs_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psubusw (__m64 __m1, __m64 __m2) +{ + return _mm_subs_pu16 (__m1, __m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing + four 32-bit intermediate results, which are then summed by pairs to + produce two 32-bit results. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_madd_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmaddwd (__m64 __m1, __m64 __m2) +{ + return _mm_madd_pi16 (__m1, __m2); +} + +/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in + M2 and produce the high 16 bits of the 32-bit results. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhi_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmulhw (__m64 __m1, __m64 __m2) +{ + return _mm_mulhi_pi16 (__m1, __m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce + the low 16 bits of the results. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mullo_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmullw (__m64 __m1, __m64 __m2) +{ + return _mm_mullo_pi16 (__m1, __m2); +} + +/* Shift four 16-bit values in M left by COUNT. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_pi16 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psllw ((__v4hi)__m, (__v4hi)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psllw (__m64 __m, __m64 __count) +{ + return _mm_sll_pi16 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_pi16 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psllwi ((__v4hi)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psllwi (__m64 __m, int __count) +{ + return _mm_slli_pi16 (__m, __count); +} + +/* Shift two 32-bit values in M left by COUNT. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_pi32 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_pslld ((__v2si)__m, (__v2si)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pslld (__m64 __m, __m64 __count) +{ + return _mm_sll_pi32 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_pi32 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_pslldi ((__v2si)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pslldi (__m64 __m, int __count) +{ + return _mm_slli_pi32 (__m, __count); +} + +/* Shift the 64-bit value in M left by COUNT. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sll_si64 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psllq ((__v1di)__m, (__v1di)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psllq (__m64 __m, __m64 __count) +{ + return _mm_sll_si64 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_slli_si64 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psllqi ((__v1di)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psllqi (__m64 __m, int __count) +{ + return _mm_slli_si64 (__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sra_pi16 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psraw ((__v4hi)__m, (__v4hi)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psraw (__m64 __m, __m64 __count) +{ + return _mm_sra_pi16 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srai_pi16 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psrawi ((__v4hi)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrawi (__m64 __m, int __count) +{ + return _mm_srai_pi16 (__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sra_pi32 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psrad ((__v2si)__m, (__v2si)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrad (__m64 __m, __m64 __count) +{ + return _mm_sra_pi32 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srai_pi32 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psradi ((__v2si)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psradi (__m64 __m, int __count) +{ + return _mm_srai_pi32 (__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in zeros. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_pi16 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, (__v4hi)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrlw (__m64 __m, __m64 __count) +{ + return _mm_srl_pi16 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_pi16 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psrlwi ((__v4hi)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrlwi (__m64 __m, int __count) +{ + return _mm_srli_pi16 (__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in zeros. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_pi32 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psrld ((__v2si)__m, (__v2si)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrld (__m64 __m, __m64 __count) +{ + return _mm_srl_pi32 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_pi32 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psrldi ((__v2si)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrldi (__m64 __m, int __count) +{ + return _mm_srli_pi32 (__m, __count); +} + +/* Shift the 64-bit value in M left by COUNT; shift in zeros. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srl_si64 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_ia32_psrlq ((__v1di)__m, (__v1di)__count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrlq (__m64 __m, __m64 __count) +{ + return _mm_srl_si64 (__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_srli_si64 (__m64 __m, int __count) +{ + return (__m64) __builtin_ia32_psrlqi ((__v1di)__m, __count); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psrlqi (__m64 __m, int __count) +{ + return _mm_srli_si64 (__m, __count); +} + +/* Bit-wise AND the 64-bit values in M1 and M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_and_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pand (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pand (__m64 __m1, __m64 __m2) +{ + return _mm_and_si64 (__m1, __m2); +} + +/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the + 64-bit value in M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_andnot_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pandn (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pandn (__m64 __m1, __m64 __m2) +{ + return _mm_andnot_si64 (__m1, __m2); +} + +/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_por (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_por (__m64 __m1, __m64 __m2) +{ + return _mm_or_si64 (__m1, __m2); +} + +/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pxor (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pxor (__m64 __m1, __m64 __m2) +{ + return _mm_xor_si64 (__m1, __m2); +} + +/* Compare eight 8-bit values. The result of the comparison is 0xFF if the + test is true and zero if false. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pcmpeqb (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi8 (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pcmpgtb (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi8 (__m1, __m2); +} + +/* Compare four 16-bit values. The result of the comparison is 0xFFFF if + the test is true and zero if false. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pcmpeqw (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi16 (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pcmpgtw (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi16 (__m1, __m2); +} + +/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if + the test is true and zero if false. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pcmpeqd (__m64 __m1, __m64 __m2) +{ + return _mm_cmpeq_pi32 (__m1, __m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pcmpgtd (__m64 __m1, __m64 __m2) +{ + return _mm_cmpgt_pi32 (__m1, __m2); +} + +/* Creates a 64-bit zero. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setzero_si64 (void) +{ + return (__m64)0LL; +} + +/* Creates a vector of two 32-bit values; I0 is least significant. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pi32 (int __i1, int __i0) +{ + return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1); +} + +/* Creates a vector of four 16-bit values; W0 is least significant. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0) +{ + return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3); +} + +/* Creates a vector of eight 8-bit values; B0 is least significant. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4, + char __b3, char __b2, char __b1, char __b0) +{ + return (__m64) __builtin_ia32_vec_init_v8qi (__b0, __b1, __b2, __b3, + __b4, __b5, __b6, __b7); +} + +/* Similar, but with the arguments in reverse order. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_pi32 (int __i0, int __i1) +{ + return _mm_set_pi32 (__i1, __i0); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3) +{ + return _mm_set_pi16 (__w3, __w2, __w1, __w0); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3, + char __b4, char __b5, char __b6, char __b7) +{ + return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); +} + +/* Creates a vector of two 32-bit values, both elements containing I. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_pi32 (int __i) +{ + return _mm_set_pi32 (__i, __i); +} + +/* Creates a vector of four 16-bit values, all elements containing W. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_pi16 (short __w) +{ + return _mm_set_pi16 (__w, __w, __w, __w); +} + +/* Creates a vector of eight 8-bit values, all elements containing B. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_pi8 (char __b) +{ + return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b); +} +#ifdef __DISABLE_MMX__ +#undef __DISABLE_MMX__ +#pragma GCC pop_options +#endif /* __DISABLE_MMX__ */ + +#endif /* _MMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/movdirintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/movdirintrin.h new file mode 100644 index 0000000..55cec81 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/movdirintrin.h @@ -0,0 +1,74 @@ +/* Copyright (C) 2018-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _MOVDIRINTRIN_H_INCLUDED +#define _MOVDIRINTRIN_H_INCLUDED + +#ifndef __MOVDIRI__ +#pragma GCC push_options +#pragma GCC target ("movdiri") +#define __DISABLE_MOVDIRI__ +#endif /* __MOVDIRI__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_directstoreu_u32 (void * __P, unsigned int __A) +{ + __builtin_ia32_directstoreu_u32 ((unsigned int *)__P, __A); +} +#ifdef __x86_64__ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_directstoreu_u64 (void * __P, unsigned long long __A) +{ + __builtin_ia32_directstoreu_u64 ((unsigned long long *)__P, __A); +} +#endif + +#ifdef __DISABLE_MOVDIRI__ +#undef __DISABLE_MOVDIRI__ +#pragma GCC pop_options +#endif /* __DISABLE_MOVDIRI__ */ + +#ifndef __MOVDIR64B__ +#pragma GCC push_options +#pragma GCC target ("movdir64b") +#define __DISABLE_MOVDIR64B__ +#endif /* __MOVDIR64B__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_movdir64b (void * __P, const void * __Q) +{ + __builtin_ia32_movdir64b (__P, __Q); +} + +#ifdef __DISABLE_MOVDIR64B__ +#undef __DISABLE_MOVDIR64B__ +#pragma GCC pop_options +#endif /* __DISABLE_MOVDIR64B__ */ +#endif /* _MOVDIRINTRIN_H_INCLUDED. */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/mwaitintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/mwaitintrin.h new file mode 100644 index 0000000..40a03b0 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/mwaitintrin.h @@ -0,0 +1,52 @@ +/* Copyright (C) 2021-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _MWAITINTRIN_H_INCLUDED +#define _MWAITINTRIN_H_INCLUDED + +#ifndef __MWAIT__ +#pragma GCC push_options +#pragma GCC target("mwait") +#define __DISABLE_MWAIT__ +#endif /* __MWAIT__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_monitor (void const * __P, unsigned int __E, unsigned int __H) +{ + __builtin_ia32_monitor (__P, __E, __H); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mwait (unsigned int __E, unsigned int __H) +{ + __builtin_ia32_mwait (__E, __H); +} + +#ifdef __DISABLE_MWAIT__ +#undef __DISABLE_MWAIT__ +#pragma GCC pop_options +#endif /* __DISABLE_MWAIT__ */ + +#endif /* _MWAITINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/mwaitxintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/mwaitxintrin.h new file mode 100644 index 0000000..2c24cf7 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/mwaitxintrin.h @@ -0,0 +1,50 @@ +/* Copyright (C) 2012-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _MWAITXINTRIN_H_INCLUDED +#define _MWAITXINTRIN_H_INCLUDED + +#ifndef __MWAITX__ +#pragma GCC push_options +#pragma GCC target("mwaitx") +#define __DISABLE_MWAITX__ +#endif /* __MWAITX__ */ + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_monitorx (void const * __P, unsigned int __E, unsigned int __H) +{ + __builtin_ia32_monitorx (__P, __E, __H); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mwaitx (unsigned int __E, unsigned int __H, unsigned int __C) +{ + __builtin_ia32_mwaitx (__E, __H, __C); +} + +#ifdef __DISABLE_MWAITX__ +#undef __DISABLE_MWAITX__ +#pragma GCC pop_options +#endif /* __DISABLE_MWAITX__ */ + +#endif /* _MWAITXINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/nmmintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/nmmintrin.h new file mode 100644 index 0000000..d71933d --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/nmmintrin.h @@ -0,0 +1,33 @@ +/* Copyright (C) 2007-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 10.0. */ + +#ifndef _NMMINTRIN_H_INCLUDED +#define _NMMINTRIN_H_INCLUDED + +/* We just include SSE4.1 header file. */ +#include + +#endif /* _NMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/pconfigintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/pconfigintrin.h new file mode 100644 index 0000000..2d4696d --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/pconfigintrin.h @@ -0,0 +1,78 @@ +/* Copyright (C) 2018-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _PCONFIGINTRIN_H_INCLUDED +#define _PCONFIGINTRIN_H_INCLUDED + +#ifndef __PCONFIG__ +#pragma GCC push_options +#pragma GCC target("pconfig") +#define __DISABLE_PCONFIG__ +#endif /* __PCONFIG__ */ + +#define __pconfig_b(leaf, b, retval) \ + __asm__ __volatile__ ("pconfig\n\t" \ + : "=a" (retval) \ + : "a" (leaf), "b" (b) \ + : "cc") + +#define __pconfig_generic(leaf, b, c, d, retval) \ + __asm__ __volatile__ ("pconfig\n\t" \ + : "=a" (retval), "=b" (b), "=c" (c), "=d" (d) \ + : "a" (leaf), "b" (b), "c" (c), "d" (d) \ + : "cc") + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_pconfig_u32 (const unsigned int __L, size_t __D[]) +{ + enum __pconfig_type + { + __PCONFIG_KEY_PROGRAM = 0x01, + }; + + unsigned int __R = 0; + + if (!__builtin_constant_p (__L)) + __pconfig_generic (__L, __D[0], __D[1], __D[2], __R); + else switch (__L) + { + case __PCONFIG_KEY_PROGRAM: + __pconfig_b (__L, __D[0], __R); + break; + default: + __pconfig_generic (__L, __D[0], __D[1], __D[2], __R); + } + return __R; +} + +#ifdef __DISABLE_PCONFIG__ +#undef __DISABLE_PCONFIG__ +#pragma GCC pop_options +#endif /* __DISABLE_PCONFIG__ */ + +#endif /* _PCONFIGINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/pkuintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/pkuintrin.h new file mode 100644 index 0000000..ac513e7 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/pkuintrin.h @@ -0,0 +1,56 @@ +/* Copyright (C) 2015-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _PKUINTRIN_H_INCLUDED +#define _PKUINTRIN_H_INCLUDED + +#ifndef __PKU__ +#pragma GCC push_options +#pragma GCC target("pku") +#define __DISABLE_PKU__ +#endif /* __PKU__ */ + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_rdpkru_u32 (void) +{ + return __builtin_ia32_rdpkru (); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_wrpkru (unsigned int __key) +{ + __builtin_ia32_wrpkru (__key); +} + +#ifdef __DISABLE_PKU__ +#undef __DISABLE_PKU__ +#pragma GCC pop_options +#endif /* __DISABLE_PKU__ */ + +#endif /* _PKUINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/pmmintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/pmmintrin.h new file mode 100644 index 0000000..396b83b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/pmmintrin.h @@ -0,0 +1,121 @@ +/* Copyright (C) 2003-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _PMMINTRIN_H_INCLUDED +#define _PMMINTRIN_H_INCLUDED + +/* We need definitions from the SSE2 and SSE header files*/ +#include +#include + +#ifndef __SSE3__ +#pragma GCC push_options +#pragma GCC target("sse3") +#define __DISABLE_SSE3__ +#endif /* __SSE3__ */ + +/* Additional bits in the MXCSR. */ +#define _MM_DENORMALS_ZERO_MASK 0x0040 +#define _MM_DENORMALS_ZERO_ON 0x0040 +#define _MM_DENORMALS_ZERO_OFF 0x0000 + +#define _MM_SET_DENORMALS_ZERO_MODE(mode) \ + _mm_setcsr ((_mm_getcsr () & ~_MM_DENORMALS_ZERO_MASK) | (mode)) +#define _MM_GET_DENORMALS_ZERO_MODE() \ + (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK) + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_addsub_ps (__m128 __X, __m128 __Y) +{ + return (__m128) __builtin_ia32_addsubps ((__v4sf)__X, (__v4sf)__Y); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_ps (__m128 __X, __m128 __Y) +{ + return (__m128) __builtin_ia32_haddps ((__v4sf)__X, (__v4sf)__Y); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_ps (__m128 __X, __m128 __Y) +{ + return (__m128) __builtin_ia32_hsubps ((__v4sf)__X, (__v4sf)__Y); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movehdup_ps (__m128 __X) +{ + return (__m128) __builtin_ia32_movshdup ((__v4sf)__X); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_moveldup_ps (__m128 __X) +{ + return (__m128) __builtin_ia32_movsldup ((__v4sf)__X); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_addsub_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) __builtin_ia32_addsubpd ((__v2df)__X, (__v2df)__Y); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) __builtin_ia32_haddpd ((__v2df)__X, (__v2df)__Y); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_pd (__m128d __X, __m128d __Y) +{ + return (__m128d) __builtin_ia32_hsubpd ((__v2df)__X, (__v2df)__Y); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loaddup_pd (double const *__P) +{ + return _mm_load1_pd (__P); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movedup_pd (__m128d __X) +{ + return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0)); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_lddqu_si128 (__m128i const *__P) +{ + return (__m128i) __builtin_ia32_lddqu ((char const *)__P); +} + +#ifdef __DISABLE_SSE3__ +#undef __DISABLE_SSE3__ +#pragma GCC pop_options +#endif /* __DISABLE_SSE3__ */ + +#endif /* _PMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/popcntintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/popcntintrin.h new file mode 100644 index 0000000..db9d925 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/popcntintrin.h @@ -0,0 +1,53 @@ +/* Copyright (C) 2009-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _POPCNTINTRIN_H_INCLUDED +#define _POPCNTINTRIN_H_INCLUDED + +#ifndef __POPCNT__ +#pragma GCC push_options +#pragma GCC target("popcnt") +#define __DISABLE_POPCNT__ +#endif /* __POPCNT__ */ + +/* Calculate a number of bits set to 1. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_popcnt_u32 (unsigned int __X) +{ + return __builtin_popcount (__X); +} + +#ifdef __x86_64__ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_popcnt_u64 (unsigned long long __X) +{ + return __builtin_popcountll (__X); +} +#endif + +#ifdef __DISABLE_POPCNT__ +#undef __DISABLE_POPCNT__ +#pragma GCC pop_options +#endif /* __DISABLE_POPCNT__ */ + +#endif /* _POPCNTINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/prfchwintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/prfchwintrin.h new file mode 100644 index 0000000..055279b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/prfchwintrin.h @@ -0,0 +1,37 @@ +/* Copyright (C) 2012-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if !defined _IMMINTRIN_H_INCLUDED && !defined _MM3DNOW_H_INCLUDED +# error "Never use directly; include or instead." +#endif + +#ifndef _PRFCHWINTRIN_H_INCLUDED +#define _PRFCHWINTRIN_H_INCLUDED + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_prefetchw (void *__P) +{ + __builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */); +} + +#endif /* _PRFCHWINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/rdseedintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/rdseedintrin.h new file mode 100644 index 0000000..fd009ff --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/rdseedintrin.h @@ -0,0 +1,66 @@ +/* Copyright (C) 2012-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _RDSEEDINTRIN_H_INCLUDED +#define _RDSEEDINTRIN_H_INCLUDED + +#ifndef __RDSEED__ +#pragma GCC push_options +#pragma GCC target("rdseed") +#define __DISABLE_RDSEED__ +#endif /* __RDSEED__ */ + + +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_rdseed16_step (unsigned short *__p) +{ + return __builtin_ia32_rdseed_hi_step (__p); +} + +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_rdseed32_step (unsigned int *__p) +{ + return __builtin_ia32_rdseed_si_step (__p); +} + +#ifdef __x86_64__ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_rdseed64_step (unsigned long long *__p) +{ + return __builtin_ia32_rdseed_di_step (__p); +} +#endif + +#ifdef __DISABLE_RDSEED__ +#undef __DISABLE_RDSEED__ +#pragma GCC pop_options +#endif /* __DISABLE_RDSEED__ */ + +#endif /* _RDSEEDINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/rtmintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/rtmintrin.h new file mode 100644 index 0000000..3cb4ce6 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/rtmintrin.h @@ -0,0 +1,84 @@ +/* Copyright (C) 2012-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _RTMINTRIN_H_INCLUDED +#define _RTMINTRIN_H_INCLUDED + +#ifndef __RTM__ +#pragma GCC push_options +#pragma GCC target("rtm") +#define __DISABLE_RTM__ +#endif /* __RTM__ */ + +#define _XBEGIN_STARTED (~0u) +#define _XABORT_EXPLICIT (1 << 0) +#define _XABORT_RETRY (1 << 1) +#define _XABORT_CONFLICT (1 << 2) +#define _XABORT_CAPACITY (1 << 3) +#define _XABORT_DEBUG (1 << 4) +#define _XABORT_NESTED (1 << 5) +#define _XABORT_CODE(x) (((x) >> 24) & 0xFF) + +/* Start an RTM code region. Return _XBEGIN_STARTED on success and the + abort condition otherwise. */ +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xbegin (void) +{ + return __builtin_ia32_xbegin (); +} + +/* Specify the end of an RTM code region. If it corresponds to the + outermost transaction, then attempts the transaction commit. If the + commit fails, then control is transferred to the outermost transaction + fallback handler. */ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xend (void) +{ + __builtin_ia32_xend (); +} + +/* Force an RTM abort condition. The control is transferred to the + outermost transaction fallback handler with the abort condition IMM. */ +#ifdef __OPTIMIZE__ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xabort (const unsigned int __imm) +{ + __builtin_ia32_xabort (__imm); +} +#else +#define _xabort(N) __builtin_ia32_xabort (N) +#endif /* __OPTIMIZE__ */ + +#ifdef __DISABLE_RTM__ +#undef __DISABLE_RTM__ +#pragma GCC pop_options +#endif /* __DISABLE_RTM__ */ + +#endif /* _RTMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/serializeintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/serializeintrin.h new file mode 100644 index 0000000..6f58a30 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/serializeintrin.h @@ -0,0 +1,49 @@ +/* Copyright (C) 2018-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _SERIALIZE_H_INCLUDED +#define _SERIALIZE_H_INCLUDED + +#ifndef __SERIALIZE__ +#pragma GCC push_options +#pragma GCC target("serialize") +#define __DISABLE_SERIALIZE__ +#endif /* __SERIALIZE__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_serialize (void) +{ + __builtin_ia32_serialize (); +} + +#ifdef __DISABLE_SERIALIZE__ +#undef __DISABLE_SERIALIZE__ +#pragma GCC pop_options +#endif /* __DISABLE_SERIALIZE__ */ + +#endif /* _SERIALIZE_H_INCLUDED. */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/sgxintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/sgxintrin.h new file mode 100644 index 0000000..4b1fe24 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/sgxintrin.h @@ -0,0 +1,253 @@ +/* Copyright (C) 2017-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _SGXINTRIN_H_INCLUDED +#define _SGXINTRIN_H_INCLUDED + +#ifndef __SGX__ +#pragma GCC push_options +#pragma GCC target("sgx") +#define __DISABLE_SGX__ +#endif /* __SGX__ */ + +#define __encls_bc(leaf, b, c, retval) \ + __asm__ __volatile__ ("encls\n\t" \ + : "=a" (retval) \ + : "a" (leaf), "b" (b), "c" (c) \ + : "cc") + +#define __encls_bcd(leaf, b, c, d, retval) \ + __asm__ __volatile__("encls\n\t" \ + : "=a" (retval) \ + : "a" (leaf), "b" (b), "c" (c), "d" (d) \ + : "cc") + +#define __encls_c(leaf, c, retval) \ + __asm__ __volatile__("encls\n\t" \ + : "=a" (retval) \ + : "a" (leaf), "c" (c) \ + : "cc") + +#define __encls_edbgrd(leaf, b, c, retval) \ + __asm__ __volatile__("encls\n\t" \ + : "=a" (retval), "=b" (b) \ + : "a" (leaf), "c" (c)) + +#define __encls_generic(leaf, b, c, d, retval) \ + __asm__ __volatile__("encls\n\t" \ + : "=a" (retval), "=b" (b), "=c" (c), "=d" (d)\ + : "a" (leaf), "b" (b), "c" (c), "d" (d) \ + : "cc") + +#define __enclu_bc(leaf, b, c, retval) \ + __asm__ __volatile__("enclu\n\t" \ + : "=a" (retval) \ + : "a" (leaf), "b" (b), "c" (c) \ + : "cc") + +#define __enclu_bcd(leaf, b, c, d, retval) \ + __asm__ __volatile__("enclu\n\t" \ + : "=a" (retval) \ + : "a" (leaf), "b" (b), "c" (c), "d" (d) \ + : "cc") + +#define __enclu_eenter(leaf, b, c, retval) \ + __asm__ __volatile__("enclu\n\t" \ + : "=a" (retval), "=c" (c) \ + : "a" (leaf), "b" (b), "c" (c) \ + : "cc") + +#define __enclu_eexit(leaf, b, c, retval) \ + __asm__ __volatile__("enclu\n\t" \ + : "=a" (retval), "=c" (c) \ + : "a" (leaf), "b" (b) \ + : "cc") + +#define __enclu_generic(leaf, b, c, d, retval) \ + __asm__ __volatile__("enclu\n\t" \ + : "=a" (retval), "=b" (b), "=c" (c), "=d" (d)\ + : "a" (leaf), "b" (b), "c" (c), "d" (d) \ + : "cc") + +#define __enclv_bc(leaf, b, c, retval) \ + __asm__ __volatile__("enclv\n\t" \ + : "=a" (retval) \ + : "a" (leaf), "b" (b), "c" (c) \ + : "cc") + +#define __enclv_cd(leaf, c, d, retval) \ + __asm__ __volatile__("enclv\n\t" \ + : "=a" (retval) \ + : "a" (leaf), "c" (c), "d" (d) \ + : "cc") + +#define __enclv_generic(leaf, b, c, d, retval) \ + __asm__ __volatile__("enclv\n\t" \ + : "=a" (retval), "=b" (b), "=c" (b), "=d" (d)\ + : "a" (leaf), "b" (b), "c" (c), "d" (d) \ + : "cc") + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_encls_u32 (const unsigned int __L, size_t __D[]) +{ + enum __encls_type + { + __SGX_ECREATE = 0x00, + __SGX_EADD = 0x01, + __SGX_EINIT = 0x02, + __SGX_EREMOVE = 0x03, + __SGX_EDBGRD = 0x04, + __SGX_EDBGWR = 0x05, + __SGX_EEXTEND = 0x06, + __SGX_ELDB = 0x07, + __SGX_ELDU = 0x08, + __SGX_EBLOCK = 0x09, + __SGX_EPA = 0x0A, + __SGX_EWB = 0x0B, + __SGX_ETRACK = 0x0C, + __SGX_EAUG = 0x0D, + __SGX_EMODPR = 0x0E, + __SGX_EMODT = 0x0F, + __SGX_ERDINFO = 0x10, + __SGX_ETRACKC = 0x11, + __SGX_ELDBC = 0x12, + __SGX_ELDUC = 0x13 + }; + enum __encls_type __T = (enum __encls_type)__L; + unsigned int __R = 0; + if (!__builtin_constant_p (__T)) + __encls_generic (__L, __D[0], __D[1], __D[2], __R); + else switch (__T) + { + case __SGX_ECREATE: + case __SGX_EADD: + case __SGX_EDBGWR: + case __SGX_EEXTEND: + case __SGX_EPA: + case __SGX_EMODPR: + case __SGX_EMODT: + case __SGX_EAUG: + case __SGX_ERDINFO: + __encls_bc (__L, __D[0], __D[1], __R); + break; + case __SGX_EINIT: + case __SGX_ELDB: + case __SGX_ELDU: + case __SGX_EWB: + case __SGX_ELDBC: + case __SGX_ELDUC: + __encls_bcd (__L, __D[0], __D[1], __D[2], __R); + break; + case __SGX_EREMOVE: + case __SGX_EBLOCK: + case __SGX_ETRACK: + case __SGX_ETRACKC: + __encls_c (__L, __D[1], __R); + break; + case __SGX_EDBGRD: + __encls_edbgrd (__L, __D[0], __D[1], __R); + break; + default: + __encls_generic (__L, __D[0], __D[1], __D[2], __R); + } + return __R; +} + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_enclu_u32 (const unsigned int __L, size_t __D[]) +{ + enum __enclu_type + { + __SGX_EREPORT = 0x00, + __SGX_EGETKEY = 0x01, + __SGX_EENTER = 0x02, + __SGX_ERESUME = 0x03, + __SGX_EEXIT = 0x04, + __SGX_EACCEPT = 0x05, + __SGX_EMODPE = 0x06, + __SGX_EACCEPTCOPY = 0x07 + }; + enum __enclu_type __T = (enum __enclu_type) __L; + unsigned int __R = 0; + if (!__builtin_constant_p (__T)) + __enclu_generic (__L, __D[0], __D[1], __D[2], __R); + else switch (__T) + { + case __SGX_EREPORT: + case __SGX_EACCEPTCOPY: + __enclu_bcd (__L, __D[0], __D[1], __D[2], __R); + break; + case __SGX_EGETKEY: + case __SGX_ERESUME: + case __SGX_EACCEPT: + case __SGX_EMODPE: + __enclu_bc (__L, __D[0], __D[1], __R); + break; + case __SGX_EENTER: + __enclu_eenter (__L, __D[0], __D[1], __R); + break; + case __SGX_EEXIT: + __enclu_eexit (__L, __D[0], __D[1], __R); + break; + default: + __enclu_generic (__L, __D[0], __D[1], __D[2], __R); + } + return __R; +} + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_enclv_u32 (const unsigned int __L, size_t __D[]) +{ + enum __enclv_type + { + __SGX_EDECVIRTCHILD = 0x00, + __SGX_EINCVIRTCHILD = 0x01, + __SGX_ESETCONTEXT = 0x02 + }; + unsigned int __R = 0; + if (!__builtin_constant_p (__L)) + __enclv_generic (__L, __D[0], __D[1], __D[2], __R); + else switch (__L) + { + case __SGX_EDECVIRTCHILD: + case __SGX_EINCVIRTCHILD: + __enclv_bc (__L, __D[0], __D[1], __R); + break; + case __SGX_ESETCONTEXT: + __enclv_cd (__L, __D[1], __D[2], __R); + break; + default: + __enclv_generic (__L, __D[0], __D[1], __D[2], __R); + } + return __R; +} + +#ifdef __DISABLE_SGX__ +#undef __DISABLE_SGX__ +#pragma GCC pop_options +#endif /* __DISABLE_SGX__ */ + +#endif /* _SGXINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/shaintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/shaintrin.h new file mode 100644 index 0000000..7230850 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/shaintrin.h @@ -0,0 +1,98 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _SHAINTRIN_H_INCLUDED +#define _SHAINTRIN_H_INCLUDED + +#ifndef __SHA__ +#pragma GCC push_options +#pragma GCC target("sha") +#define __DISABLE_SHA__ +#endif /* __SHA__ */ + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha1msg1_epu32 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_sha1msg1 ((__v4si) __A, (__v4si) __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha1msg2_epu32 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_sha1msg2 ((__v4si) __A, (__v4si) __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha1nexte_epu32 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_sha1nexte ((__v4si) __A, (__v4si) __B); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha1rnds4_epu32 (__m128i __A, __m128i __B, const int __I) +{ + return (__m128i) __builtin_ia32_sha1rnds4 ((__v4si) __A, (__v4si) __B, __I); +} +#else +#define _mm_sha1rnds4_epu32(A, B, I) \ + ((__m128i) __builtin_ia32_sha1rnds4 ((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (int)(I))) +#endif + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha256msg1_epu32 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_sha256msg1 ((__v4si) __A, (__v4si) __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha256msg2_epu32 (__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_sha256msg2 ((__v4si) __A, (__v4si) __B); +} + +extern __inline __m128i +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha256rnds2_epu32 (__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_sha256rnds2 ((__v4si) __A, (__v4si) __B, + (__v4si) __C); +} + +#ifdef __DISABLE_SHA__ +#undef __DISABLE_SHA__ +#pragma GCC pop_options +#endif /* __DISABLE_SHA__ */ + +#endif /* _SHAINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/smmintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/smmintrin.h new file mode 100644 index 0000000..eb6a451 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/smmintrin.h @@ -0,0 +1,851 @@ +/* Copyright (C) 2007-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 10.0. */ + +#ifndef _SMMINTRIN_H_INCLUDED +#define _SMMINTRIN_H_INCLUDED + +/* We need definitions from the SSSE3, SSE3, SSE2 and SSE header + files. */ +#include + +#ifndef __SSE4_1__ +#pragma GCC push_options +#pragma GCC target("sse4.1") +#define __DISABLE_SSE4_1__ +#endif /* __SSE4_1__ */ + +/* Rounding mode macros. */ +#define _MM_FROUND_TO_NEAREST_INT 0x00 +#define _MM_FROUND_TO_NEG_INF 0x01 +#define _MM_FROUND_TO_POS_INF 0x02 +#define _MM_FROUND_TO_ZERO 0x03 +#define _MM_FROUND_CUR_DIRECTION 0x04 + +#define _MM_FROUND_RAISE_EXC 0x00 +#define _MM_FROUND_NO_EXC 0x08 + +#define _MM_FROUND_NINT \ + (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_FLOOR \ + (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_CEIL \ + (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_TRUNC \ + (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_RINT \ + (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC) +#define _MM_FROUND_NEARBYINT \ + (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC) + +/* Test Instruction */ +/* Packed integer 128-bit bitwise comparison. Return 1 if + (__V & __M) == 0. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testz_si128 (__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestz128 ((__v2di)__M, (__v2di)__V); +} + +/* Packed integer 128-bit bitwise comparison. Return 1 if + (__V & ~__M) == 0. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testc_si128 (__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestc128 ((__v2di)__M, (__v2di)__V); +} + +/* Packed integer 128-bit bitwise comparison. Return 1 if + (__V & __M) != 0 && (__V & ~__M) != 0. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_testnzc_si128 (__m128i __M, __m128i __V) +{ + return __builtin_ia32_ptestnzc128 ((__v2di)__M, (__v2di)__V); +} + +/* Macros for packed integer 128-bit comparison intrinsics. */ +#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V)) + +#define _mm_test_all_ones(V) \ + _mm_testc_si128 ((V), _mm_cmpeq_epi32 ((V), (V))) + +#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128 ((M), (V)) + +/* Packed/scalar double precision floating point rounding. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_round_pd (__m128d __V, const int __M) +{ + return (__m128d) __builtin_ia32_roundpd ((__v2df)__V, __M); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_round_sd(__m128d __D, __m128d __V, const int __M) +{ + return (__m128d) __builtin_ia32_roundsd ((__v2df)__D, + (__v2df)__V, + __M); +} +#else +#define _mm_round_pd(V, M) \ + ((__m128d) __builtin_ia32_roundpd ((__v2df)(__m128d)(V), (int)(M))) + +#define _mm_round_sd(D, V, M) \ + ((__m128d) __builtin_ia32_roundsd ((__v2df)(__m128d)(D), \ + (__v2df)(__m128d)(V), (int)(M))) +#endif + +/* Packed/scalar single precision floating point rounding. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_round_ps (__m128 __V, const int __M) +{ + return (__m128) __builtin_ia32_roundps ((__v4sf)__V, __M); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_round_ss (__m128 __D, __m128 __V, const int __M) +{ + return (__m128) __builtin_ia32_roundss ((__v4sf)__D, + (__v4sf)__V, + __M); +} +#else +#define _mm_round_ps(V, M) \ + ((__m128) __builtin_ia32_roundps ((__v4sf)(__m128)(V), (int)(M))) + +#define _mm_round_ss(D, V, M) \ + ((__m128) __builtin_ia32_roundss ((__v4sf)(__m128)(D), \ + (__v4sf)(__m128)(V), (int)(M))) +#endif + +/* Macros for ceil/floor intrinsics. */ +#define _mm_ceil_pd(V) _mm_round_pd ((V), _MM_FROUND_CEIL) +#define _mm_ceil_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_CEIL) + +#define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR) +#define _mm_floor_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_FLOOR) + +#define _mm_ceil_ps(V) _mm_round_ps ((V), _MM_FROUND_CEIL) +#define _mm_ceil_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_CEIL) + +#define _mm_floor_ps(V) _mm_round_ps ((V), _MM_FROUND_FLOOR) +#define _mm_floor_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_FLOOR) + +/* SSE4.1 */ + +/* Integer blend instructions - select data from 2 sources using + constant/variable mask. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blend_epi16 (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__X, + (__v8hi)__Y, + __M); +} +#else +#define _mm_blend_epi16(X, Y, M) \ + ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(X), \ + (__v8hi)(__m128i)(Y), (int)(M))) +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blendv_epi8 (__m128i __X, __m128i __Y, __m128i __M) +{ + return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__X, + (__v16qi)__Y, + (__v16qi)__M); +} + +/* Single precision floating point blend instructions - select data + from 2 sources using constant/variable mask. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blend_ps (__m128 __X, __m128 __Y, const int __M) +{ + return (__m128) __builtin_ia32_blendps ((__v4sf)__X, + (__v4sf)__Y, + __M); +} +#else +#define _mm_blend_ps(X, Y, M) \ + ((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(M))) +#endif + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blendv_ps (__m128 __X, __m128 __Y, __m128 __M) +{ + return (__m128) __builtin_ia32_blendvps ((__v4sf)__X, + (__v4sf)__Y, + (__v4sf)__M); +} + +/* Double precision floating point blend instructions - select data + from 2 sources using constant/variable mask. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blend_pd (__m128d __X, __m128d __Y, const int __M) +{ + return (__m128d) __builtin_ia32_blendpd ((__v2df)__X, + (__v2df)__Y, + __M); +} +#else +#define _mm_blend_pd(X, Y, M) \ + ((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(M))) +#endif + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_blendv_pd (__m128d __X, __m128d __Y, __m128d __M) +{ + return (__m128d) __builtin_ia32_blendvpd ((__v2df)__X, + (__v2df)__Y, + (__v2df)__M); +} + +/* Dot product instructions with mask-defined summing and zeroing parts + of result. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_dp_ps (__m128 __X, __m128 __Y, const int __M) +{ + return (__m128) __builtin_ia32_dpps ((__v4sf)__X, + (__v4sf)__Y, + __M); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_dp_pd (__m128d __X, __m128d __Y, const int __M) +{ + return (__m128d) __builtin_ia32_dppd ((__v2df)__X, + (__v2df)__Y, + __M); +} +#else +#define _mm_dp_ps(X, Y, M) \ + ((__m128) __builtin_ia32_dpps ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(M))) + +#define _mm_dp_pd(X, Y, M) \ + ((__m128d) __builtin_ia32_dppd ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(M))) +#endif + +/* Packed integer 64-bit comparison, zeroing or filling with ones + corresponding parts of result. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_epi64 (__m128i __X, __m128i __Y) +{ + return (__m128i) ((__v2di)__X == (__v2di)__Y); +} + +/* Min/max packed integer instructions. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminsb128 ((__v16qi)__X, (__v16qi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi)__X, (__v16qi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epu16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminuw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epu16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminsd128 ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_epu32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pminud128 ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_epu32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaxud128 ((__v4si)__X, (__v4si)__Y); +} + +/* Packed integer 32-bit multiplication with truncation of upper + halves of results. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mullo_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) ((__v4su)__X * (__v4su)__Y); +} + +/* Packed integer 32-bit multiplication of 2 pairs of operands + with two 64-bit results. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__X, (__v4si)__Y); +} + +/* Insert single precision float into packed single precision array + element selected by index N. The bits [7-6] of N define S + index, the bits [5-4] define D index, and bits [3-0] define + zeroing mask for D. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_ps (__m128 __D, __m128 __S, const int __N) +{ + return (__m128) __builtin_ia32_insertps128 ((__v4sf)__D, + (__v4sf)__S, + __N); +} +#else +#define _mm_insert_ps(D, S, N) \ + ((__m128) __builtin_ia32_insertps128 ((__v4sf)(__m128)(D), \ + (__v4sf)(__m128)(S), (int)(N))) +#endif + +/* Helper macro to create the N value for _mm_insert_ps. */ +#define _MM_MK_INSERTPS_NDX(S, D, M) (((S) << 6) | ((D) << 4) | (M)) + +/* Extract binary representation of single precision float from packed + single precision array element of X selected by index N. */ + +#ifdef __OPTIMIZE__ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_ps (__m128 __X, const int __N) +{ + union { int i; float f; } __tmp; + __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)__X, __N); + return __tmp.i; +} +#else +#define _mm_extract_ps(X, N) \ + (__extension__ \ + ({ \ + union { int i; float f; } __tmp; \ + __tmp.f = __builtin_ia32_vec_ext_v4sf ((__v4sf)(__m128)(X), (int)(N)); \ + __tmp.i; \ + })) +#endif + +/* Extract binary representation of single precision float into + D from packed single precision array element of S selected + by index N. */ +#define _MM_EXTRACT_FLOAT(D, S, N) \ + { (D) = __builtin_ia32_vec_ext_v4sf ((__v4sf)(S), (N)); } + +/* Extract specified single precision float element into the lower + part of __m128. */ +#define _MM_PICK_OUT_PS(X, N) \ + _mm_insert_ps (_mm_setzero_ps (), (X), \ + _MM_MK_INSERTPS_NDX ((N), 0, 0x0e)) + +/* Insert integer, S, into packed integer array element of D + selected by index N. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_epi8 (__m128i __D, int __S, const int __N) +{ + return (__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)__D, + __S, __N); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_epi32 (__m128i __D, int __S, const int __N) +{ + return (__m128i) __builtin_ia32_vec_set_v4si ((__v4si)__D, + __S, __N); +} + +#ifdef __x86_64__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_epi64 (__m128i __D, long long __S, const int __N) +{ + return (__m128i) __builtin_ia32_vec_set_v2di ((__v2di)__D, + __S, __N); +} +#endif +#else +#define _mm_insert_epi8(D, S, N) \ + ((__m128i) __builtin_ia32_vec_set_v16qi ((__v16qi)(__m128i)(D), \ + (int)(S), (int)(N))) + +#define _mm_insert_epi32(D, S, N) \ + ((__m128i) __builtin_ia32_vec_set_v4si ((__v4si)(__m128i)(D), \ + (int)(S), (int)(N))) + +#ifdef __x86_64__ +#define _mm_insert_epi64(D, S, N) \ + ((__m128i) __builtin_ia32_vec_set_v2di ((__v2di)(__m128i)(D), \ + (long long)(S), (int)(N))) +#endif +#endif + +/* Extract integer from packed integer array element of X selected by + index N. */ + +#ifdef __OPTIMIZE__ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_epi8 (__m128i __X, const int __N) +{ + return (unsigned char) __builtin_ia32_vec_ext_v16qi ((__v16qi)__X, __N); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_epi32 (__m128i __X, const int __N) +{ + return __builtin_ia32_vec_ext_v4si ((__v4si)__X, __N); +} + +#ifdef __x86_64__ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_epi64 (__m128i __X, const int __N) +{ + return __builtin_ia32_vec_ext_v2di ((__v2di)__X, __N); +} +#endif +#else +#define _mm_extract_epi8(X, N) \ + ((int) (unsigned char) __builtin_ia32_vec_ext_v16qi ((__v16qi)(__m128i)(X), (int)(N))) +#define _mm_extract_epi32(X, N) \ + ((int) __builtin_ia32_vec_ext_v4si ((__v4si)(__m128i)(X), (int)(N))) + +#ifdef __x86_64__ +#define _mm_extract_epi64(X, N) \ + ((long long) __builtin_ia32_vec_ext_v2di ((__v2di)(__m128i)(X), (int)(N))) +#endif +#endif + +/* Return horizontal packed word minimum and its index in bits [15:0] + and bits [18:16] respectively. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_minpos_epu16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_phminposuw128 ((__v8hi)__X); +} + +/* Packed integer sign-extension. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi8_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxbd128 ((__v16qi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi16_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxwd128 ((__v8hi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi8_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxbq128 ((__v16qi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi32_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxdq128 ((__v4si)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi16_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxwq128 ((__v8hi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepi8_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovsxbw128 ((__v16qi)__X); +} + +/* Packed integer zero-extension. */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu8_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxbd128 ((__v16qi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu16_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxwd128 ((__v8hi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu8_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxbq128 ((__v16qi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu32_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxdq128 ((__v4si)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu16_epi64 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxwq128 ((__v8hi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtepu8_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pmovzxbw128 ((__v16qi)__X); +} + +/* Pack 8 double words from 2 operands into 8 words of result with + unsigned saturation. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_packus_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_packusdw128 ((__v4si)__X, (__v4si)__Y); +} + +/* Sum absolute 8-bit integer difference of adjacent groups of 4 + byte integers in the first 2 operands. Starting offsets within + operands are determined by the 3rd mask operand. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mpsadbw_epu8 (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)__X, + (__v16qi)__Y, __M); +} +#else +#define _mm_mpsadbw_epu8(X, Y, M) \ + ((__m128i) __builtin_ia32_mpsadbw128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) +#endif + +/* Load double quadword using non-temporal aligned hint. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_load_si128 (__m128i *__X) +{ + return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __X); +} + +#ifndef __SSE4_2__ +#pragma GCC push_options +#pragma GCC target("sse4.2") +#define __DISABLE_SSE4_2__ +#endif /* __SSE4_2__ */ + +/* These macros specify the source data format. */ +#define _SIDD_UBYTE_OPS 0x00 +#define _SIDD_UWORD_OPS 0x01 +#define _SIDD_SBYTE_OPS 0x02 +#define _SIDD_SWORD_OPS 0x03 + +/* These macros specify the comparison operation. */ +#define _SIDD_CMP_EQUAL_ANY 0x00 +#define _SIDD_CMP_RANGES 0x04 +#define _SIDD_CMP_EQUAL_EACH 0x08 +#define _SIDD_CMP_EQUAL_ORDERED 0x0c + +/* These macros specify the polarity. */ +#define _SIDD_POSITIVE_POLARITY 0x00 +#define _SIDD_NEGATIVE_POLARITY 0x10 +#define _SIDD_MASKED_POSITIVE_POLARITY 0x20 +#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30 + +/* These macros specify the output selection in _mm_cmpXstri (). */ +#define _SIDD_LEAST_SIGNIFICANT 0x00 +#define _SIDD_MOST_SIGNIFICANT 0x40 + +/* These macros specify the output selection in _mm_cmpXstrm (). */ +#define _SIDD_BIT_MASK 0x00 +#define _SIDD_UNIT_MASK 0x40 + +/* Intrinsics for text/string processing. */ + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistrm (__m128i __X, __m128i __Y, const int __M) +{ + return (__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistri (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistri128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestrm (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return (__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestri (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestri128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} +#else +#define _mm_cmpistrm(X, Y, M) \ + ((__m128i) __builtin_ia32_pcmpistrm128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) +#define _mm_cmpistri(X, Y, M) \ + ((int) __builtin_ia32_pcmpistri128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) + +#define _mm_cmpestrm(X, LX, Y, LY, M) \ + ((__m128i) __builtin_ia32_pcmpestrm128 ((__v16qi)(__m128i)(X), \ + (int)(LX), (__v16qi)(__m128i)(Y), \ + (int)(LY), (int)(M))) +#define _mm_cmpestri(X, LX, Y, LY, M) \ + ((int) __builtin_ia32_pcmpestri128 ((__v16qi)(__m128i)(X), (int)(LX), \ + (__v16qi)(__m128i)(Y), (int)(LY), \ + (int)(M))) +#endif + +/* Intrinsics for text/string processing and reading values of + EFlags. */ + +#ifdef __OPTIMIZE__ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistra (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistria128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistrc (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistric128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistro (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistrio128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistrs (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistris128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpistrz (__m128i __X, __m128i __Y, const int __M) +{ + return __builtin_ia32_pcmpistriz128 ((__v16qi)__X, + (__v16qi)__Y, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestra (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestria128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestrc (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestric128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestro (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestrio128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestrs (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestris128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpestrz (__m128i __X, int __LX, __m128i __Y, int __LY, const int __M) +{ + return __builtin_ia32_pcmpestriz128 ((__v16qi)__X, __LX, + (__v16qi)__Y, __LY, + __M); +} +#else +#define _mm_cmpistra(X, Y, M) \ + ((int) __builtin_ia32_pcmpistria128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) +#define _mm_cmpistrc(X, Y, M) \ + ((int) __builtin_ia32_pcmpistric128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) +#define _mm_cmpistro(X, Y, M) \ + ((int) __builtin_ia32_pcmpistrio128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) +#define _mm_cmpistrs(X, Y, M) \ + ((int) __builtin_ia32_pcmpistris128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) +#define _mm_cmpistrz(X, Y, M) \ + ((int) __builtin_ia32_pcmpistriz128 ((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (int)(M))) + +#define _mm_cmpestra(X, LX, Y, LY, M) \ + ((int) __builtin_ia32_pcmpestria128 ((__v16qi)(__m128i)(X), (int)(LX), \ + (__v16qi)(__m128i)(Y), (int)(LY), \ + (int)(M))) +#define _mm_cmpestrc(X, LX, Y, LY, M) \ + ((int) __builtin_ia32_pcmpestric128 ((__v16qi)(__m128i)(X), (int)(LX), \ + (__v16qi)(__m128i)(Y), (int)(LY), \ + (int)(M))) +#define _mm_cmpestro(X, LX, Y, LY, M) \ + ((int) __builtin_ia32_pcmpestrio128 ((__v16qi)(__m128i)(X), (int)(LX), \ + (__v16qi)(__m128i)(Y), (int)(LY), \ + (int)(M))) +#define _mm_cmpestrs(X, LX, Y, LY, M) \ + ((int) __builtin_ia32_pcmpestris128 ((__v16qi)(__m128i)(X), (int)(LX), \ + (__v16qi)(__m128i)(Y), (int)(LY), \ + (int)(M))) +#define _mm_cmpestrz(X, LX, Y, LY, M) \ + ((int) __builtin_ia32_pcmpestriz128 ((__v16qi)(__m128i)(X), (int)(LX), \ + (__v16qi)(__m128i)(Y), (int)(LY), \ + (int)(M))) +#endif + +/* Packed integer 64-bit comparison, zeroing or filling with ones + corresponding parts of result. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_epi64 (__m128i __X, __m128i __Y) +{ + return (__m128i) ((__v2di)__X > (__v2di)__Y); +} + +#ifdef __DISABLE_SSE4_2__ +#undef __DISABLE_SSE4_2__ +#pragma GCC pop_options +#endif /* __DISABLE_SSE4_2__ */ + +#ifdef __DISABLE_SSE4_1__ +#undef __DISABLE_SSE4_1__ +#pragma GCC pop_options +#endif /* __DISABLE_SSE4_1__ */ + +#include + +#ifndef __CRC32__ +#pragma GCC push_options +#pragma GCC target("crc32") +#define __DISABLE_CRC32__ +#endif /* __CRC32__ */ + +/* Accumulate CRC32 (polynomial 0x11EDC6F41) value. */ +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_crc32_u8 (unsigned int __C, unsigned char __V) +{ + return __builtin_ia32_crc32qi (__C, __V); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_crc32_u16 (unsigned int __C, unsigned short __V) +{ + return __builtin_ia32_crc32hi (__C, __V); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_crc32_u32 (unsigned int __C, unsigned int __V) +{ + return __builtin_ia32_crc32si (__C, __V); +} + +#ifdef __x86_64__ +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_crc32_u64 (unsigned long long __C, unsigned long long __V) +{ + return __builtin_ia32_crc32di (__C, __V); +} +#endif + +#ifdef __DISABLE_CRC32__ +#undef __DISABLE_CRC32__ +#pragma GCC pop_options +#endif /* __DISABLE_CRC32__ */ + +#endif /* _SMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/stdalign.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdalign.h new file mode 100644 index 0000000..b10cad1 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdalign.h @@ -0,0 +1,39 @@ +/* Copyright (C) 2011-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* ISO C1X: 7.15 Alignment . */ + +#ifndef _STDALIGN_H +#define _STDALIGN_H + +#ifndef __cplusplus + +#define alignas _Alignas +#define alignof _Alignof + +#define __alignas_is_defined 1 +#define __alignof_is_defined 1 + +#endif + +#endif /* stdalign.h */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/stdarg.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdarg.h new file mode 100644 index 0000000..7545ed3 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdarg.h @@ -0,0 +1,127 @@ +/* Copyright (C) 1989-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* + * ISO C Standard: 7.15 Variable arguments + */ + +#ifndef _STDARG_H +#ifndef _ANSI_STDARG_H_ +#ifndef __need___va_list +#define _STDARG_H +#define _ANSI_STDARG_H_ +#endif /* not __need___va_list */ +#undef __need___va_list + +/* Define __gnuc_va_list. */ + +#ifndef __GNUC_VA_LIST +#define __GNUC_VA_LIST +typedef __builtin_va_list __gnuc_va_list; +#endif + +/* Define the standard macros for the user, + if this invocation was from the user program. */ +#ifdef _STDARG_H + +#define va_start(v,l) __builtin_va_start(v,l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v,l) __builtin_va_arg(v,l) +#if !defined(__STRICT_ANSI__) || __STDC_VERSION__ + 0 >= 199900L \ + || __cplusplus + 0 >= 201103L +#define va_copy(d,s) __builtin_va_copy(d,s) +#endif +#define __va_copy(d,s) __builtin_va_copy(d,s) + +/* Define va_list, if desired, from __gnuc_va_list. */ +/* We deliberately do not define va_list when called from + stdio.h, because ANSI C says that stdio.h is not supposed to define + va_list. stdio.h needs to have access to that data type, + but must not use that name. It should use the name __gnuc_va_list, + which is safe because it is reserved for the implementation. */ + +#ifdef _BSD_VA_LIST +#undef _BSD_VA_LIST +#endif + +#if defined(__svr4__) || (defined(_SCO_DS) && !defined(__VA_LIST)) +/* SVR4.2 uses _VA_LIST for an internal alias for va_list, + so we must avoid testing it and setting it here. + SVR4 uses _VA_LIST as a flag in stdarg.h, but we should + have no conflict with that. */ +#ifndef _VA_LIST_ +#define _VA_LIST_ +#ifdef __i860__ +#ifndef _VA_LIST +#define _VA_LIST va_list +#endif +#endif /* __i860__ */ +typedef __gnuc_va_list va_list; +#ifdef _SCO_DS +#define __VA_LIST +#endif +#endif /* _VA_LIST_ */ +#else /* not __svr4__ || _SCO_DS */ + +/* The macro _VA_LIST_ is the same thing used by this file in Ultrix. + But on BSD NET2 we must not test or define or undef it. + (Note that the comments in NET 2's ansi.h + are incorrect for _VA_LIST_--see stdio.h!) */ +#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT) +/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */ +#ifndef _VA_LIST_DEFINED +/* The macro _VA_LIST is used in SCO Unix 3.2. */ +#ifndef _VA_LIST +/* The macro _VA_LIST_T_H is used in the Bull dpx2 */ +#ifndef _VA_LIST_T_H +/* The macro __va_list__ is used by BeOS. */ +#ifndef __va_list__ +typedef __gnuc_va_list va_list; +#endif /* not __va_list__ */ +#endif /* not _VA_LIST_T_H */ +#endif /* not _VA_LIST */ +#endif /* not _VA_LIST_DEFINED */ +#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__)) +#define _VA_LIST_ +#endif +#ifndef _VA_LIST +#define _VA_LIST +#endif +#ifndef _VA_LIST_DEFINED +#define _VA_LIST_DEFINED +#endif +#ifndef _VA_LIST_T_H +#define _VA_LIST_T_H +#endif +#ifndef __va_list__ +#define __va_list__ +#endif + +#endif /* not _VA_LIST_, except on certain systems */ + +#endif /* not __svr4__ */ + +#endif /* _STDARG_H */ + +#endif /* not _ANSI_STDARG_H_ */ +#endif /* not _STDARG_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/stdatomic.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdatomic.h new file mode 100644 index 0000000..bfcfdf6 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdatomic.h @@ -0,0 +1,243 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* ISO C11 Standard: 7.17 Atomics . */ + +#ifndef _STDATOMIC_H +#define _STDATOMIC_H + +typedef enum + { + memory_order_relaxed = __ATOMIC_RELAXED, + memory_order_consume = __ATOMIC_CONSUME, + memory_order_acquire = __ATOMIC_ACQUIRE, + memory_order_release = __ATOMIC_RELEASE, + memory_order_acq_rel = __ATOMIC_ACQ_REL, + memory_order_seq_cst = __ATOMIC_SEQ_CST + } memory_order; + + +typedef _Atomic _Bool atomic_bool; +typedef _Atomic char atomic_char; +typedef _Atomic signed char atomic_schar; +typedef _Atomic unsigned char atomic_uchar; +typedef _Atomic short atomic_short; +typedef _Atomic unsigned short atomic_ushort; +typedef _Atomic int atomic_int; +typedef _Atomic unsigned int atomic_uint; +typedef _Atomic long atomic_long; +typedef _Atomic unsigned long atomic_ulong; +typedef _Atomic long long atomic_llong; +typedef _Atomic unsigned long long atomic_ullong; +typedef _Atomic __CHAR16_TYPE__ atomic_char16_t; +typedef _Atomic __CHAR32_TYPE__ atomic_char32_t; +typedef _Atomic __WCHAR_TYPE__ atomic_wchar_t; +typedef _Atomic __INT_LEAST8_TYPE__ atomic_int_least8_t; +typedef _Atomic __UINT_LEAST8_TYPE__ atomic_uint_least8_t; +typedef _Atomic __INT_LEAST16_TYPE__ atomic_int_least16_t; +typedef _Atomic __UINT_LEAST16_TYPE__ atomic_uint_least16_t; +typedef _Atomic __INT_LEAST32_TYPE__ atomic_int_least32_t; +typedef _Atomic __UINT_LEAST32_TYPE__ atomic_uint_least32_t; +typedef _Atomic __INT_LEAST64_TYPE__ atomic_int_least64_t; +typedef _Atomic __UINT_LEAST64_TYPE__ atomic_uint_least64_t; +typedef _Atomic __INT_FAST8_TYPE__ atomic_int_fast8_t; +typedef _Atomic __UINT_FAST8_TYPE__ atomic_uint_fast8_t; +typedef _Atomic __INT_FAST16_TYPE__ atomic_int_fast16_t; +typedef _Atomic __UINT_FAST16_TYPE__ atomic_uint_fast16_t; +typedef _Atomic __INT_FAST32_TYPE__ atomic_int_fast32_t; +typedef _Atomic __UINT_FAST32_TYPE__ atomic_uint_fast32_t; +typedef _Atomic __INT_FAST64_TYPE__ atomic_int_fast64_t; +typedef _Atomic __UINT_FAST64_TYPE__ atomic_uint_fast64_t; +typedef _Atomic __INTPTR_TYPE__ atomic_intptr_t; +typedef _Atomic __UINTPTR_TYPE__ atomic_uintptr_t; +typedef _Atomic __SIZE_TYPE__ atomic_size_t; +typedef _Atomic __PTRDIFF_TYPE__ atomic_ptrdiff_t; +typedef _Atomic __INTMAX_TYPE__ atomic_intmax_t; +typedef _Atomic __UINTMAX_TYPE__ atomic_uintmax_t; + + +#define ATOMIC_VAR_INIT(VALUE) (VALUE) + +/* Initialize an atomic object pointed to by PTR with VAL. */ +#define atomic_init(PTR, VAL) \ + atomic_store_explicit (PTR, VAL, __ATOMIC_RELAXED) + +#define kill_dependency(Y) \ + __extension__ \ + ({ \ + __auto_type __kill_dependency_tmp = (Y); \ + __kill_dependency_tmp; \ + }) + +extern void atomic_thread_fence (memory_order); +#define atomic_thread_fence(MO) __atomic_thread_fence (MO) +extern void atomic_signal_fence (memory_order); +#define atomic_signal_fence(MO) __atomic_signal_fence (MO) +#define atomic_is_lock_free(OBJ) __atomic_is_lock_free (sizeof (*(OBJ)), (OBJ)) + +#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE +#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE +#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE +#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE +#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE +#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE +#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE +#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE +#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE +#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE + + +/* Note that these macros require __auto_type to remove + _Atomic qualifiers (and const qualifiers, if those are valid on + macro operands). + + Also note that the header file uses the generic form of __atomic + builtins, which requires the address to be taken of the value + parameter, and then we pass that value on. This allows the macros + to work for any type, and the compiler is smart enough to convert + these to lock-free _N variants if possible, and throw away the + temps. */ + +#define atomic_store_explicit(PTR, VAL, MO) \ + __extension__ \ + ({ \ + __auto_type __atomic_store_ptr = (PTR); \ + __typeof__ ((void)0, *__atomic_store_ptr) __atomic_store_tmp = (VAL); \ + __atomic_store (__atomic_store_ptr, &__atomic_store_tmp, (MO)); \ + }) + +#define atomic_store(PTR, VAL) \ + atomic_store_explicit (PTR, VAL, __ATOMIC_SEQ_CST) + + +#define atomic_load_explicit(PTR, MO) \ + __extension__ \ + ({ \ + __auto_type __atomic_load_ptr = (PTR); \ + __typeof__ ((void)0, *__atomic_load_ptr) __atomic_load_tmp; \ + __atomic_load (__atomic_load_ptr, &__atomic_load_tmp, (MO)); \ + __atomic_load_tmp; \ + }) + +#define atomic_load(PTR) atomic_load_explicit (PTR, __ATOMIC_SEQ_CST) + + +#define atomic_exchange_explicit(PTR, VAL, MO) \ + __extension__ \ + ({ \ + __auto_type __atomic_exchange_ptr = (PTR); \ + __typeof__ ((void)0, *__atomic_exchange_ptr) __atomic_exchange_val = (VAL); \ + __typeof__ ((void)0, *__atomic_exchange_ptr) __atomic_exchange_tmp; \ + __atomic_exchange (__atomic_exchange_ptr, &__atomic_exchange_val, \ + &__atomic_exchange_tmp, (MO)); \ + __atomic_exchange_tmp; \ + }) + +#define atomic_exchange(PTR, VAL) \ + atomic_exchange_explicit (PTR, VAL, __ATOMIC_SEQ_CST) + + +#define atomic_compare_exchange_strong_explicit(PTR, VAL, DES, SUC, FAIL) \ + __extension__ \ + ({ \ + __auto_type __atomic_compare_exchange_ptr = (PTR); \ + __typeof__ ((void)0, *__atomic_compare_exchange_ptr) __atomic_compare_exchange_tmp \ + = (DES); \ + __atomic_compare_exchange (__atomic_compare_exchange_ptr, (VAL), \ + &__atomic_compare_exchange_tmp, 0, \ + (SUC), (FAIL)); \ + }) + +#define atomic_compare_exchange_strong(PTR, VAL, DES) \ + atomic_compare_exchange_strong_explicit (PTR, VAL, DES, __ATOMIC_SEQ_CST, \ + __ATOMIC_SEQ_CST) + +#define atomic_compare_exchange_weak_explicit(PTR, VAL, DES, SUC, FAIL) \ + __extension__ \ + ({ \ + __auto_type __atomic_compare_exchange_ptr = (PTR); \ + __typeof__ ((void)0, *__atomic_compare_exchange_ptr) __atomic_compare_exchange_tmp \ + = (DES); \ + __atomic_compare_exchange (__atomic_compare_exchange_ptr, (VAL), \ + &__atomic_compare_exchange_tmp, 1, \ + (SUC), (FAIL)); \ + }) + +#define atomic_compare_exchange_weak(PTR, VAL, DES) \ + atomic_compare_exchange_weak_explicit (PTR, VAL, DES, __ATOMIC_SEQ_CST, \ + __ATOMIC_SEQ_CST) + + + +#define atomic_fetch_add(PTR, VAL) __atomic_fetch_add ((PTR), (VAL), \ + __ATOMIC_SEQ_CST) +#define atomic_fetch_add_explicit(PTR, VAL, MO) \ + __atomic_fetch_add ((PTR), (VAL), (MO)) + +#define atomic_fetch_sub(PTR, VAL) __atomic_fetch_sub ((PTR), (VAL), \ + __ATOMIC_SEQ_CST) +#define atomic_fetch_sub_explicit(PTR, VAL, MO) \ + __atomic_fetch_sub ((PTR), (VAL), (MO)) + +#define atomic_fetch_or(PTR, VAL) __atomic_fetch_or ((PTR), (VAL), \ + __ATOMIC_SEQ_CST) +#define atomic_fetch_or_explicit(PTR, VAL, MO) \ + __atomic_fetch_or ((PTR), (VAL), (MO)) + +#define atomic_fetch_xor(PTR, VAL) __atomic_fetch_xor ((PTR), (VAL), \ + __ATOMIC_SEQ_CST) +#define atomic_fetch_xor_explicit(PTR, VAL, MO) \ + __atomic_fetch_xor ((PTR), (VAL), (MO)) + +#define atomic_fetch_and(PTR, VAL) __atomic_fetch_and ((PTR), (VAL), \ + __ATOMIC_SEQ_CST) +#define atomic_fetch_and_explicit(PTR, VAL, MO) \ + __atomic_fetch_and ((PTR), (VAL), (MO)) + + +typedef _Atomic struct +{ +#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1 + _Bool __val; +#else + unsigned char __val; +#endif +} atomic_flag; + +#define ATOMIC_FLAG_INIT { 0 } + + +extern _Bool atomic_flag_test_and_set (volatile atomic_flag *); +#define atomic_flag_test_and_set(PTR) \ + __atomic_test_and_set ((PTR), __ATOMIC_SEQ_CST) +extern _Bool atomic_flag_test_and_set_explicit (volatile atomic_flag *, + memory_order); +#define atomic_flag_test_and_set_explicit(PTR, MO) \ + __atomic_test_and_set ((PTR), (MO)) + +extern void atomic_flag_clear (volatile atomic_flag *); +#define atomic_flag_clear(PTR) __atomic_clear ((PTR), __ATOMIC_SEQ_CST) +extern void atomic_flag_clear_explicit (volatile atomic_flag *, memory_order); +#define atomic_flag_clear_explicit(PTR, MO) __atomic_clear ((PTR), (MO)) + +#endif /* _STDATOMIC_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/stdbool.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdbool.h new file mode 100644 index 0000000..fe500d9 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdbool.h @@ -0,0 +1,52 @@ +/* Copyright (C) 1998-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* + * ISO C Standard: 7.16 Boolean type and values + */ + +#ifndef _STDBOOL_H +#define _STDBOOL_H + +#ifndef __cplusplus + +#define bool _Bool +#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L +#define true ((_Bool)+1u) +#define false ((_Bool)+0u) +#else +#define true 1 +#define false 0 +#endif + +#else /* __cplusplus */ + +/* Supporting _Bool in C++ is a GCC extension. */ +#define _Bool bool + +#endif /* __cplusplus */ + +/* Signal that all the definitions are present. */ +#define __bool_true_false_are_defined 1 + +#endif /* stdbool.h */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/stddef.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/stddef.h new file mode 100644 index 0000000..79e296d --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/stddef.h @@ -0,0 +1,449 @@ +/* Copyright (C) 1989-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* + * ISO C Standard: 7.17 Common definitions + */ +#if (!defined(_STDDEF_H) && !defined(_STDDEF_H_) && !defined(_ANSI_STDDEF_H) \ + && !defined(__STDDEF_H__)) \ + || defined(__need_wchar_t) || defined(__need_size_t) \ + || defined(__need_ptrdiff_t) || defined(__need_NULL) \ + || defined(__need_wint_t) + +/* Any one of these symbols __need_* means that GNU libc + wants us just to define one data type. So don't define + the symbols that indicate this file's entire job has been done. */ +#if (!defined(__need_wchar_t) && !defined(__need_size_t) \ + && !defined(__need_ptrdiff_t) && !defined(__need_NULL) \ + && !defined(__need_wint_t)) +#define _STDDEF_H +#define _STDDEF_H_ +/* snaroff@next.com says the NeXT needs this. */ +#define _ANSI_STDDEF_H +#endif + +#ifndef __sys_stdtypes_h +/* This avoids lossage on SunOS but only if stdtypes.h comes first. + There's no way to win with the other order! Sun lossage. */ + +#if defined(__NetBSD__) +#include +#endif + +#if defined (__FreeBSD__) +#include +#endif + +#if defined(__NetBSD__) +#if !defined(_SIZE_T_) && !defined(_BSD_SIZE_T_) +#define _SIZE_T +#endif +#if !defined(_PTRDIFF_T_) && !defined(_BSD_PTRDIFF_T_) +#define _PTRDIFF_T +#endif +/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_ + instead of _WCHAR_T_. */ +#if !defined(_WCHAR_T_) && !defined(_BSD_WCHAR_T_) +#ifndef _BSD_WCHAR_T_ +#define _WCHAR_T +#endif +#endif +/* Undef _FOO_T_ if we are supposed to define foo_t. */ +#if defined (__need_ptrdiff_t) || defined (_STDDEF_H_) +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#if defined (__need_size_t) || defined (_STDDEF_H_) +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#if defined (__need_wchar_t) || defined (_STDDEF_H_) +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +#endif /* defined(__NetBSD__) */ + +/* Sequent's header files use _PTRDIFF_T_ in some conflicting way. + Just ignore it. */ +#if defined (__sequent__) && defined (_PTRDIFF_T_) +#undef _PTRDIFF_T_ +#endif + +/* On VxWorks, may have defined macros like + _TYPE_size_t which will typedef size_t. fixincludes patched the + vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is + not defined, and so that defining this macro defines _GCC_SIZE_T. + If we find that the macros are still defined at this point, we must + invoke them so that the type is defined as expected. */ +#if defined (_TYPE_ptrdiff_t) && (defined (__need_ptrdiff_t) || defined (_STDDEF_H_)) +_TYPE_ptrdiff_t; +#undef _TYPE_ptrdiff_t +#endif +#if defined (_TYPE_size_t) && (defined (__need_size_t) || defined (_STDDEF_H_)) +_TYPE_size_t; +#undef _TYPE_size_t +#endif +#if defined (_TYPE_wchar_t) && (defined (__need_wchar_t) || defined (_STDDEF_H_)) +_TYPE_wchar_t; +#undef _TYPE_wchar_t +#endif + +/* In case nobody has defined these types, but we aren't running under + GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and + __WCHAR_TYPE__ have reasonable values. This can happen if the + parts of GCC is compiled by an older compiler, that actually + include gstddef.h, such as collect2. */ + +/* Signed type of difference of two pointers. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_ptrdiff_t) +#ifndef _PTRDIFF_T /* in case has defined it. */ +#ifndef _T_PTRDIFF_ +#ifndef _T_PTRDIFF +#ifndef __PTRDIFF_T +#ifndef _PTRDIFF_T_ +#ifndef _BSD_PTRDIFF_T_ +#ifndef ___int_ptrdiff_t_h +#ifndef _GCC_PTRDIFF_T +#ifndef _PTRDIFF_T_DECLARED /* DragonFly */ +#ifndef __DEFINED_ptrdiff_t /* musl libc */ +#define _PTRDIFF_T +#define _T_PTRDIFF_ +#define _T_PTRDIFF +#define __PTRDIFF_T +#define _PTRDIFF_T_ +#define _BSD_PTRDIFF_T_ +#define ___int_ptrdiff_t_h +#define _GCC_PTRDIFF_T +#define _PTRDIFF_T_DECLARED +#define __DEFINED_ptrdiff_t +#ifndef __PTRDIFF_TYPE__ +#define __PTRDIFF_TYPE__ long int +#endif +typedef __PTRDIFF_TYPE__ ptrdiff_t; +#endif /* __DEFINED_ptrdiff_t */ +#endif /* _PTRDIFF_T_DECLARED */ +#endif /* _GCC_PTRDIFF_T */ +#endif /* ___int_ptrdiff_t_h */ +#endif /* _BSD_PTRDIFF_T_ */ +#endif /* _PTRDIFF_T_ */ +#endif /* __PTRDIFF_T */ +#endif /* _T_PTRDIFF */ +#endif /* _T_PTRDIFF_ */ +#endif /* _PTRDIFF_T */ + +/* If this symbol has done its job, get rid of it. */ +#undef __need_ptrdiff_t + +#endif /* _STDDEF_H or __need_ptrdiff_t. */ + +/* Unsigned type of `sizeof' something. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_size_t) +#ifndef __size_t__ /* BeOS */ +#ifndef __SIZE_T__ /* Cray Unicos/Mk */ +#ifndef _SIZE_T /* in case has defined it. */ +#ifndef _SYS_SIZE_T_H +#ifndef _T_SIZE_ +#ifndef _T_SIZE +#ifndef __SIZE_T +#ifndef _SIZE_T_ +#ifndef _BSD_SIZE_T_ +#ifndef _SIZE_T_DEFINED_ +#ifndef _SIZE_T_DEFINED +#ifndef _BSD_SIZE_T_DEFINED_ /* Darwin */ +#ifndef _SIZE_T_DECLARED /* FreeBSD 5 */ +#ifndef __DEFINED_size_t /* musl libc */ +#ifndef ___int_size_t_h +#ifndef _GCC_SIZE_T +#ifndef _SIZET_ +#ifndef __size_t +#define __size_t__ /* BeOS */ +#define __SIZE_T__ /* Cray Unicos/Mk */ +#define _SIZE_T +#define _SYS_SIZE_T_H +#define _T_SIZE_ +#define _T_SIZE +#define __SIZE_T +#define _SIZE_T_ +#define _BSD_SIZE_T_ +#define _SIZE_T_DEFINED_ +#define _SIZE_T_DEFINED +#define _BSD_SIZE_T_DEFINED_ /* Darwin */ +#define _SIZE_T_DECLARED /* FreeBSD 5 */ +#define __DEFINED_size_t /* musl libc */ +#define ___int_size_t_h +#define _GCC_SIZE_T +#define _SIZET_ +#if defined (__FreeBSD__) \ + || defined(__DragonFly__) \ + || defined(__FreeBSD_kernel__) \ + || defined(__VMS__) +/* __size_t is a typedef, must not trash it. */ +#else +#define __size_t +#endif +#ifndef __SIZE_TYPE__ +#define __SIZE_TYPE__ long unsigned int +#endif +#if !(defined (__GNUG__) && defined (size_t)) +typedef __SIZE_TYPE__ size_t; +#ifdef __BEOS__ +typedef long ssize_t; +#endif /* __BEOS__ */ +#endif /* !(defined (__GNUG__) && defined (size_t)) */ +#endif /* __size_t */ +#endif /* _SIZET_ */ +#endif /* _GCC_SIZE_T */ +#endif /* ___int_size_t_h */ +#endif /* __DEFINED_size_t */ +#endif /* _SIZE_T_DECLARED */ +#endif /* _BSD_SIZE_T_DEFINED_ */ +#endif /* _SIZE_T_DEFINED */ +#endif /* _SIZE_T_DEFINED_ */ +#endif /* _BSD_SIZE_T_ */ +#endif /* _SIZE_T_ */ +#endif /* __SIZE_T */ +#endif /* _T_SIZE */ +#endif /* _T_SIZE_ */ +#endif /* _SYS_SIZE_T_H */ +#endif /* _SIZE_T */ +#endif /* __SIZE_T__ */ +#endif /* __size_t__ */ +#undef __need_size_t +#endif /* _STDDEF_H or __need_size_t. */ + + +/* Wide character type. + Locale-writers should change this as necessary to + be big enough to hold unique values not between 0 and 127, + and not (wchar_t) -1, for each defined multibyte character. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_wchar_t) +#ifndef __wchar_t__ /* BeOS */ +#ifndef __WCHAR_T__ /* Cray Unicos/Mk */ +#ifndef _WCHAR_T +#ifndef _T_WCHAR_ +#ifndef _T_WCHAR +#ifndef __WCHAR_T +#ifndef _WCHAR_T_ +#ifndef _BSD_WCHAR_T_ +#ifndef _BSD_WCHAR_T_DEFINED_ /* Darwin */ +#ifndef _BSD_RUNE_T_DEFINED_ /* Darwin */ +#ifndef _WCHAR_T_DECLARED /* FreeBSD 5 */ +#ifndef __DEFINED_wchar_t /* musl libc */ +#ifndef _WCHAR_T_DEFINED_ +#ifndef _WCHAR_T_DEFINED +#ifndef _WCHAR_T_H +#ifndef ___int_wchar_t_h +#ifndef __INT_WCHAR_T_H +#ifndef _GCC_WCHAR_T +#define __wchar_t__ /* BeOS */ +#define __WCHAR_T__ /* Cray Unicos/Mk */ +#define _WCHAR_T +#define _T_WCHAR_ +#define _T_WCHAR +#define __WCHAR_T +#define _WCHAR_T_ +#define _BSD_WCHAR_T_ +#define _WCHAR_T_DEFINED_ +#define _WCHAR_T_DEFINED +#define _WCHAR_T_H +#define ___int_wchar_t_h +#define __INT_WCHAR_T_H +#define _GCC_WCHAR_T +#define _WCHAR_T_DECLARED +#define __DEFINED_wchar_t + +/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_ + instead of _WCHAR_T_, and _BSD_RUNE_T_ (which, unlike the other + symbols in the _FOO_T_ family, stays defined even after its + corresponding type is defined). If we define wchar_t, then we + must undef _WCHAR_T_; for BSD/386 1.1 (and perhaps others), if + we undef _WCHAR_T_, then we must also define rune_t, since + headers like runetype.h assume that if machine/ansi.h is included, + and _BSD_WCHAR_T_ is not defined, then rune_t is available. + machine/ansi.h says, "Note that _WCHAR_T_ and _RUNE_T_ must be of + the same type." */ +#ifdef _BSD_WCHAR_T_ +#undef _BSD_WCHAR_T_ +#ifdef _BSD_RUNE_T_ +#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE) +typedef _BSD_RUNE_T_ rune_t; +#define _BSD_WCHAR_T_DEFINED_ +#define _BSD_RUNE_T_DEFINED_ /* Darwin */ +#if defined (__FreeBSD__) && (__FreeBSD__ < 5) +/* Why is this file so hard to maintain properly? In contrast to + the comment above regarding BSD/386 1.1, on FreeBSD for as long + as the symbol has existed, _BSD_RUNE_T_ must not stay defined or + redundant typedefs will occur when stdlib.h is included after this file. */ +#undef _BSD_RUNE_T_ +#endif +#endif +#endif +#endif +/* FreeBSD 5 can't be handled well using "traditional" logic above + since it no longer defines _BSD_RUNE_T_ yet still desires to export + rune_t in some cases... */ +#if defined (__FreeBSD__) && (__FreeBSD__ >= 5) +#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE) +#if __BSD_VISIBLE +#ifndef _RUNE_T_DECLARED +typedef __rune_t rune_t; +#define _RUNE_T_DECLARED +#endif +#endif +#endif +#endif + +#ifndef __WCHAR_TYPE__ +#define __WCHAR_TYPE__ int +#endif +#ifndef __cplusplus +typedef __WCHAR_TYPE__ wchar_t; +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif /* __DEFINED_wchar_t */ +#endif /* _WCHAR_T_DECLARED */ +#endif /* _BSD_RUNE_T_DEFINED_ */ +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif /* __WCHAR_T__ */ +#endif /* __wchar_t__ */ +#undef __need_wchar_t +#endif /* _STDDEF_H or __need_wchar_t. */ + +#if defined (__need_wint_t) +#ifndef _WINT_T +#define _WINT_T + +#ifndef __WINT_TYPE__ +#define __WINT_TYPE__ unsigned int +#endif +typedef __WINT_TYPE__ wint_t; +#endif +#undef __need_wint_t +#endif + +#if defined(__NetBSD__) +/* The references to _GCC_PTRDIFF_T_, _GCC_SIZE_T_, and _GCC_WCHAR_T_ + are probably typos and should be removed before 2.8 is released. */ +#ifdef _GCC_PTRDIFF_T_ +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#ifdef _GCC_SIZE_T_ +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#ifdef _GCC_WCHAR_T_ +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +/* The following ones are the real ones. */ +#ifdef _GCC_PTRDIFF_T +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#ifdef _GCC_SIZE_T +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#ifdef _GCC_WCHAR_T +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +#endif /* __NetBSD__ */ + +#endif /* __sys_stdtypes_h */ + +/* A null pointer constant. */ + +#if defined (_STDDEF_H) || defined (__need_NULL) +#undef NULL /* in case has defined it. */ +#ifdef __GNUG__ +#define NULL __null +#else /* G++ */ +#ifndef __cplusplus +#define NULL ((void *)0) +#else /* C++ */ +#define NULL 0 +#endif /* C++ */ +#endif /* G++ */ +#endif /* NULL not defined and or need NULL. */ +#undef __need_NULL + +#ifdef _STDDEF_H + +/* Offset of member MEMBER in a struct of type TYPE. */ +#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER) + +#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) \ + || (defined(__cplusplus) && __cplusplus >= 201103L) +#ifndef _GCC_MAX_ALIGN_T +#define _GCC_MAX_ALIGN_T +/* Type whose alignment is supported in every context and is at least + as great as that of any standard type not using alignment + specifiers. */ +typedef struct { + long long __max_align_ll __attribute__((__aligned__(__alignof__(long long)))); + long double __max_align_ld __attribute__((__aligned__(__alignof__(long double)))); + /* _Float128 is defined as a basic type, so max_align_t must be + sufficiently aligned for it. This code must work in C++, so we + use __float128 here; that is only available on some + architectures, but only on i386 is extra alignment needed for + __float128. */ +#ifdef __i386__ + __float128 __max_align_f128 __attribute__((__aligned__(__alignof(__float128)))); +#endif +} max_align_t; +#endif +#endif /* C11 or C++11. */ + +#if defined(__cplusplus) && __cplusplus >= 201103L +#ifndef _GXX_NULLPTR_T +#define _GXX_NULLPTR_T + typedef decltype(nullptr) nullptr_t; +#endif +#endif /* C++11. */ + +#endif /* _STDDEF_H was defined this time */ + +#endif /* !_STDDEF_H && !_STDDEF_H_ && !_ANSI_STDDEF_H && !__STDDEF_H__ + || __need_XXX was not defined before */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/stdfix.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdfix.h new file mode 100644 index 0000000..4795b77 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdfix.h @@ -0,0 +1,204 @@ +/* Copyright (C) 2007-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* ISO/IEC JTC1 SC22 WG14 N1169 + * Date: 2006-04-04 + * ISO/IEC TR 18037 + * Programming languages - C - Extensions to support embedded processors + */ + +#ifndef _STDFIX_H +#define _STDFIX_H + +/* 7.18a.1 Introduction. */ + +#undef fract +#undef accum +#undef sat +#define fract _Fract +#define accum _Accum +#define sat _Sat + +/* 7.18a.3 Precision macros. */ + +#undef SFRACT_FBIT +#undef SFRACT_MIN +#undef SFRACT_MAX +#undef SFRACT_EPSILON +#define SFRACT_FBIT __SFRACT_FBIT__ +#define SFRACT_MIN __SFRACT_MIN__ +#define SFRACT_MAX __SFRACT_MAX__ +#define SFRACT_EPSILON __SFRACT_EPSILON__ + +#undef USFRACT_FBIT +#undef USFRACT_MIN +#undef USFRACT_MAX +#undef USFRACT_EPSILON +#define USFRACT_FBIT __USFRACT_FBIT__ +#define USFRACT_MIN __USFRACT_MIN__ /* GCC extension. */ +#define USFRACT_MAX __USFRACT_MAX__ +#define USFRACT_EPSILON __USFRACT_EPSILON__ + +#undef FRACT_FBIT +#undef FRACT_MIN +#undef FRACT_MAX +#undef FRACT_EPSILON +#define FRACT_FBIT __FRACT_FBIT__ +#define FRACT_MIN __FRACT_MIN__ +#define FRACT_MAX __FRACT_MAX__ +#define FRACT_EPSILON __FRACT_EPSILON__ + +#undef UFRACT_FBIT +#undef UFRACT_MIN +#undef UFRACT_MAX +#undef UFRACT_EPSILON +#define UFRACT_FBIT __UFRACT_FBIT__ +#define UFRACT_MIN __UFRACT_MIN__ /* GCC extension. */ +#define UFRACT_MAX __UFRACT_MAX__ +#define UFRACT_EPSILON __UFRACT_EPSILON__ + +#undef LFRACT_FBIT +#undef LFRACT_MIN +#undef LFRACT_MAX +#undef LFRACT_EPSILON +#define LFRACT_FBIT __LFRACT_FBIT__ +#define LFRACT_MIN __LFRACT_MIN__ +#define LFRACT_MAX __LFRACT_MAX__ +#define LFRACT_EPSILON __LFRACT_EPSILON__ + +#undef ULFRACT_FBIT +#undef ULFRACT_MIN +#undef ULFRACT_MAX +#undef ULFRACT_EPSILON +#define ULFRACT_FBIT __ULFRACT_FBIT__ +#define ULFRACT_MIN __ULFRACT_MIN__ /* GCC extension. */ +#define ULFRACT_MAX __ULFRACT_MAX__ +#define ULFRACT_EPSILON __ULFRACT_EPSILON__ + +#undef LLFRACT_FBIT +#undef LLFRACT_MIN +#undef LLFRACT_MAX +#undef LLFRACT_EPSILON +#define LLFRACT_FBIT __LLFRACT_FBIT__ /* GCC extension. */ +#define LLFRACT_MIN __LLFRACT_MIN__ /* GCC extension. */ +#define LLFRACT_MAX __LLFRACT_MAX__ /* GCC extension. */ +#define LLFRACT_EPSILON __LLFRACT_EPSILON__ /* GCC extension. */ + +#undef ULLFRACT_FBIT +#undef ULLFRACT_MIN +#undef ULLFRACT_MAX +#undef ULLFRACT_EPSILON +#define ULLFRACT_FBIT __ULLFRACT_FBIT__ /* GCC extension. */ +#define ULLFRACT_MIN __ULLFRACT_MIN__ /* GCC extension. */ +#define ULLFRACT_MAX __ULLFRACT_MAX__ /* GCC extension. */ +#define ULLFRACT_EPSILON __ULLFRACT_EPSILON__ /* GCC extension. */ + +#undef SACCUM_FBIT +#undef SACCUM_IBIT +#undef SACCUM_MIN +#undef SACCUM_MAX +#undef SACCUM_EPSILON +#define SACCUM_FBIT __SACCUM_FBIT__ +#define SACCUM_IBIT __SACCUM_IBIT__ +#define SACCUM_MIN __SACCUM_MIN__ +#define SACCUM_MAX __SACCUM_MAX__ +#define SACCUM_EPSILON __SACCUM_EPSILON__ + +#undef USACCUM_FBIT +#undef USACCUM_IBIT +#undef USACCUM_MIN +#undef USACCUM_MAX +#undef USACCUM_EPSILON +#define USACCUM_FBIT __USACCUM_FBIT__ +#define USACCUM_IBIT __USACCUM_IBIT__ +#define USACCUM_MIN __USACCUM_MIN__ /* GCC extension. */ +#define USACCUM_MAX __USACCUM_MAX__ +#define USACCUM_EPSILON __USACCUM_EPSILON__ + +#undef ACCUM_FBIT +#undef ACCUM_IBIT +#undef ACCUM_MIN +#undef ACCUM_MAX +#undef ACCUM_EPSILON +#define ACCUM_FBIT __ACCUM_FBIT__ +#define ACCUM_IBIT __ACCUM_IBIT__ +#define ACCUM_MIN __ACCUM_MIN__ +#define ACCUM_MAX __ACCUM_MAX__ +#define ACCUM_EPSILON __ACCUM_EPSILON__ + +#undef UACCUM_FBIT +#undef UACCUM_IBIT +#undef UACCUM_MIN +#undef UACCUM_MAX +#undef UACCUM_EPSILON +#define UACCUM_FBIT __UACCUM_FBIT__ +#define UACCUM_IBIT __UACCUM_IBIT__ +#define UACCUM_MIN __UACCUM_MIN__ /* GCC extension. */ +#define UACCUM_MAX __UACCUM_MAX__ +#define UACCUM_EPSILON __UACCUM_EPSILON__ + +#undef LACCUM_FBIT +#undef LACCUM_IBIT +#undef LACCUM_MIN +#undef LACCUM_MAX +#undef LACCUM_EPSILON +#define LACCUM_FBIT __LACCUM_FBIT__ +#define LACCUM_IBIT __LACCUM_IBIT__ +#define LACCUM_MIN __LACCUM_MIN__ +#define LACCUM_MAX __LACCUM_MAX__ +#define LACCUM_EPSILON __LACCUM_EPSILON__ + +#undef ULACCUM_FBIT +#undef ULACCUM_IBIT +#undef ULACCUM_MIN +#undef ULACCUM_MAX +#undef ULACCUM_EPSILON +#define ULACCUM_FBIT __ULACCUM_FBIT__ +#define ULACCUM_IBIT __ULACCUM_IBIT__ +#define ULACCUM_MIN __ULACCUM_MIN__ /* GCC extension. */ +#define ULACCUM_MAX __ULACCUM_MAX__ +#define ULACCUM_EPSILON __ULACCUM_EPSILON__ + +#undef LLACCUM_FBIT +#undef LLACCUM_IBIT +#undef LLACCUM_MIN +#undef LLACCUM_MAX +#undef LLACCUM_EPSILON +#define LLACCUM_FBIT __LLACCUM_FBIT__ /* GCC extension. */ +#define LLACCUM_IBIT __LLACCUM_IBIT__ /* GCC extension. */ +#define LLACCUM_MIN __LLACCUM_MIN__ /* GCC extension. */ +#define LLACCUM_MAX __LLACCUM_MAX__ /* GCC extension. */ +#define LLACCUM_EPSILON __LLACCUM_EPSILON__ /* GCC extension. */ + +#undef ULLACCUM_FBIT +#undef ULLACCUM_IBIT +#undef ULLACCUM_MIN +#undef ULLACCUM_MAX +#undef ULLACCUM_EPSILON +#define ULLACCUM_FBIT __ULLACCUM_FBIT__ /* GCC extension. */ +#define ULLACCUM_IBIT __ULLACCUM_IBIT__ /* GCC extension. */ +#define ULLACCUM_MIN __ULLACCUM_MIN__ /* GCC extension. */ +#define ULLACCUM_MAX __ULLACCUM_MAX__ /* GCC extension. */ +#define ULLACCUM_EPSILON __ULLACCUM_EPSILON__ /* GCC extension. */ + +#endif /* _STDFIX_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/stdint-gcc.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdint-gcc.h new file mode 100644 index 0000000..6be01ae --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdint-gcc.h @@ -0,0 +1,365 @@ +/* Copyright (C) 2008-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* + * ISO C Standard: 7.18 Integer types + */ + +#ifndef _GCC_STDINT_H +#define _GCC_STDINT_H + +/* 7.8.1.1 Exact-width integer types */ + +#ifdef __INT8_TYPE__ +typedef __INT8_TYPE__ int8_t; +#endif +#ifdef __INT16_TYPE__ +typedef __INT16_TYPE__ int16_t; +#endif +#ifdef __INT32_TYPE__ +typedef __INT32_TYPE__ int32_t; +#endif +#ifdef __INT64_TYPE__ +typedef __INT64_TYPE__ int64_t; +#endif +#ifdef __UINT8_TYPE__ +typedef __UINT8_TYPE__ uint8_t; +#endif +#ifdef __UINT16_TYPE__ +typedef __UINT16_TYPE__ uint16_t; +#endif +#ifdef __UINT32_TYPE__ +typedef __UINT32_TYPE__ uint32_t; +#endif +#ifdef __UINT64_TYPE__ +typedef __UINT64_TYPE__ uint64_t; +#endif + +/* 7.8.1.2 Minimum-width integer types */ + +typedef __INT_LEAST8_TYPE__ int_least8_t; +typedef __INT_LEAST16_TYPE__ int_least16_t; +typedef __INT_LEAST32_TYPE__ int_least32_t; +typedef __INT_LEAST64_TYPE__ int_least64_t; +typedef __UINT_LEAST8_TYPE__ uint_least8_t; +typedef __UINT_LEAST16_TYPE__ uint_least16_t; +typedef __UINT_LEAST32_TYPE__ uint_least32_t; +typedef __UINT_LEAST64_TYPE__ uint_least64_t; + +/* 7.8.1.3 Fastest minimum-width integer types */ + +typedef __INT_FAST8_TYPE__ int_fast8_t; +typedef __INT_FAST16_TYPE__ int_fast16_t; +typedef __INT_FAST32_TYPE__ int_fast32_t; +typedef __INT_FAST64_TYPE__ int_fast64_t; +typedef __UINT_FAST8_TYPE__ uint_fast8_t; +typedef __UINT_FAST16_TYPE__ uint_fast16_t; +typedef __UINT_FAST32_TYPE__ uint_fast32_t; +typedef __UINT_FAST64_TYPE__ uint_fast64_t; + +/* 7.8.1.4 Integer types capable of holding object pointers */ + +#ifdef __INTPTR_TYPE__ +typedef __INTPTR_TYPE__ intptr_t; +#endif +#ifdef __UINTPTR_TYPE__ +typedef __UINTPTR_TYPE__ uintptr_t; +#endif + +/* 7.8.1.5 Greatest-width integer types */ + +typedef __INTMAX_TYPE__ intmax_t; +typedef __UINTMAX_TYPE__ uintmax_t; + +#if (!defined __cplusplus || __cplusplus >= 201103L \ + || defined __STDC_LIMIT_MACROS) + +/* 7.18.2 Limits of specified-width integer types */ + +#ifdef __INT8_MAX__ +# undef INT8_MAX +# define INT8_MAX __INT8_MAX__ +# undef INT8_MIN +# define INT8_MIN (-INT8_MAX - 1) +#endif +#ifdef __UINT8_MAX__ +# undef UINT8_MAX +# define UINT8_MAX __UINT8_MAX__ +#endif +#ifdef __INT16_MAX__ +# undef INT16_MAX +# define INT16_MAX __INT16_MAX__ +# undef INT16_MIN +# define INT16_MIN (-INT16_MAX - 1) +#endif +#ifdef __UINT16_MAX__ +# undef UINT16_MAX +# define UINT16_MAX __UINT16_MAX__ +#endif +#ifdef __INT32_MAX__ +# undef INT32_MAX +# define INT32_MAX __INT32_MAX__ +# undef INT32_MIN +# define INT32_MIN (-INT32_MAX - 1) +#endif +#ifdef __UINT32_MAX__ +# undef UINT32_MAX +# define UINT32_MAX __UINT32_MAX__ +#endif +#ifdef __INT64_MAX__ +# undef INT64_MAX +# define INT64_MAX __INT64_MAX__ +# undef INT64_MIN +# define INT64_MIN (-INT64_MAX - 1) +#endif +#ifdef __UINT64_MAX__ +# undef UINT64_MAX +# define UINT64_MAX __UINT64_MAX__ +#endif + +#undef INT_LEAST8_MAX +#define INT_LEAST8_MAX __INT_LEAST8_MAX__ +#undef INT_LEAST8_MIN +#define INT_LEAST8_MIN (-INT_LEAST8_MAX - 1) +#undef UINT_LEAST8_MAX +#define UINT_LEAST8_MAX __UINT_LEAST8_MAX__ +#undef INT_LEAST16_MAX +#define INT_LEAST16_MAX __INT_LEAST16_MAX__ +#undef INT_LEAST16_MIN +#define INT_LEAST16_MIN (-INT_LEAST16_MAX - 1) +#undef UINT_LEAST16_MAX +#define UINT_LEAST16_MAX __UINT_LEAST16_MAX__ +#undef INT_LEAST32_MAX +#define INT_LEAST32_MAX __INT_LEAST32_MAX__ +#undef INT_LEAST32_MIN +#define INT_LEAST32_MIN (-INT_LEAST32_MAX - 1) +#undef UINT_LEAST32_MAX +#define UINT_LEAST32_MAX __UINT_LEAST32_MAX__ +#undef INT_LEAST64_MAX +#define INT_LEAST64_MAX __INT_LEAST64_MAX__ +#undef INT_LEAST64_MIN +#define INT_LEAST64_MIN (-INT_LEAST64_MAX - 1) +#undef UINT_LEAST64_MAX +#define UINT_LEAST64_MAX __UINT_LEAST64_MAX__ + +#undef INT_FAST8_MAX +#define INT_FAST8_MAX __INT_FAST8_MAX__ +#undef INT_FAST8_MIN +#define INT_FAST8_MIN (-INT_FAST8_MAX - 1) +#undef UINT_FAST8_MAX +#define UINT_FAST8_MAX __UINT_FAST8_MAX__ +#undef INT_FAST16_MAX +#define INT_FAST16_MAX __INT_FAST16_MAX__ +#undef INT_FAST16_MIN +#define INT_FAST16_MIN (-INT_FAST16_MAX - 1) +#undef UINT_FAST16_MAX +#define UINT_FAST16_MAX __UINT_FAST16_MAX__ +#undef INT_FAST32_MAX +#define INT_FAST32_MAX __INT_FAST32_MAX__ +#undef INT_FAST32_MIN +#define INT_FAST32_MIN (-INT_FAST32_MAX - 1) +#undef UINT_FAST32_MAX +#define UINT_FAST32_MAX __UINT_FAST32_MAX__ +#undef INT_FAST64_MAX +#define INT_FAST64_MAX __INT_FAST64_MAX__ +#undef INT_FAST64_MIN +#define INT_FAST64_MIN (-INT_FAST64_MAX - 1) +#undef UINT_FAST64_MAX +#define UINT_FAST64_MAX __UINT_FAST64_MAX__ + +#ifdef __INTPTR_MAX__ +# undef INTPTR_MAX +# define INTPTR_MAX __INTPTR_MAX__ +# undef INTPTR_MIN +# define INTPTR_MIN (-INTPTR_MAX - 1) +#endif +#ifdef __UINTPTR_MAX__ +# undef UINTPTR_MAX +# define UINTPTR_MAX __UINTPTR_MAX__ +#endif + +#undef INTMAX_MAX +#define INTMAX_MAX __INTMAX_MAX__ +#undef INTMAX_MIN +#define INTMAX_MIN (-INTMAX_MAX - 1) +#undef UINTMAX_MAX +#define UINTMAX_MAX __UINTMAX_MAX__ + +/* 7.18.3 Limits of other integer types */ + +#undef PTRDIFF_MAX +#define PTRDIFF_MAX __PTRDIFF_MAX__ +#undef PTRDIFF_MIN +#define PTRDIFF_MIN (-PTRDIFF_MAX - 1) + +#undef SIG_ATOMIC_MAX +#define SIG_ATOMIC_MAX __SIG_ATOMIC_MAX__ +#undef SIG_ATOMIC_MIN +#define SIG_ATOMIC_MIN __SIG_ATOMIC_MIN__ + +#undef SIZE_MAX +#define SIZE_MAX __SIZE_MAX__ + +#undef WCHAR_MAX +#define WCHAR_MAX __WCHAR_MAX__ +#undef WCHAR_MIN +#define WCHAR_MIN __WCHAR_MIN__ + +#undef WINT_MAX +#define WINT_MAX __WINT_MAX__ +#undef WINT_MIN +#define WINT_MIN __WINT_MIN__ + +#endif /* (!defined __cplusplus || __cplusplus >= 201103L + || defined __STDC_LIMIT_MACROS) */ + +#if (!defined __cplusplus || __cplusplus >= 201103L \ + || defined __STDC_CONSTANT_MACROS) + +#undef INT8_C +#define INT8_C(c) __INT8_C(c) +#undef INT16_C +#define INT16_C(c) __INT16_C(c) +#undef INT32_C +#define INT32_C(c) __INT32_C(c) +#undef INT64_C +#define INT64_C(c) __INT64_C(c) +#undef UINT8_C +#define UINT8_C(c) __UINT8_C(c) +#undef UINT16_C +#define UINT16_C(c) __UINT16_C(c) +#undef UINT32_C +#define UINT32_C(c) __UINT32_C(c) +#undef UINT64_C +#define UINT64_C(c) __UINT64_C(c) +#undef INTMAX_C +#define INTMAX_C(c) __INTMAX_C(c) +#undef UINTMAX_C +#define UINTMAX_C(c) __UINTMAX_C(c) + +#endif /* (!defined __cplusplus || __cplusplus >= 201103L + || defined __STDC_CONSTANT_MACROS) */ + +#if (defined __STDC_WANT_IEC_60559_BFP_EXT__ \ + || (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L)) +/* TS 18661-1 / C2X widths of integer types. */ + +#ifdef __INT8_TYPE__ +# undef INT8_WIDTH +# define INT8_WIDTH 8 +#endif +#ifdef __UINT8_TYPE__ +# undef UINT8_WIDTH +# define UINT8_WIDTH 8 +#endif +#ifdef __INT16_TYPE__ +# undef INT16_WIDTH +# define INT16_WIDTH 16 +#endif +#ifdef __UINT16_TYPE__ +# undef UINT16_WIDTH +# define UINT16_WIDTH 16 +#endif +#ifdef __INT32_TYPE__ +# undef INT32_WIDTH +# define INT32_WIDTH 32 +#endif +#ifdef __UINT32_TYPE__ +# undef UINT32_WIDTH +# define UINT32_WIDTH 32 +#endif +#ifdef __INT64_TYPE__ +# undef INT64_WIDTH +# define INT64_WIDTH 64 +#endif +#ifdef __UINT64_TYPE__ +# undef UINT64_WIDTH +# define UINT64_WIDTH 64 +#endif + +#undef INT_LEAST8_WIDTH +#define INT_LEAST8_WIDTH __INT_LEAST8_WIDTH__ +#undef UINT_LEAST8_WIDTH +#define UINT_LEAST8_WIDTH __INT_LEAST8_WIDTH__ +#undef INT_LEAST16_WIDTH +#define INT_LEAST16_WIDTH __INT_LEAST16_WIDTH__ +#undef UINT_LEAST16_WIDTH +#define UINT_LEAST16_WIDTH __INT_LEAST16_WIDTH__ +#undef INT_LEAST32_WIDTH +#define INT_LEAST32_WIDTH __INT_LEAST32_WIDTH__ +#undef UINT_LEAST32_WIDTH +#define UINT_LEAST32_WIDTH __INT_LEAST32_WIDTH__ +#undef INT_LEAST64_WIDTH +#define INT_LEAST64_WIDTH __INT_LEAST64_WIDTH__ +#undef UINT_LEAST64_WIDTH +#define UINT_LEAST64_WIDTH __INT_LEAST64_WIDTH__ + +#undef INT_FAST8_WIDTH +#define INT_FAST8_WIDTH __INT_FAST8_WIDTH__ +#undef UINT_FAST8_WIDTH +#define UINT_FAST8_WIDTH __INT_FAST8_WIDTH__ +#undef INT_FAST16_WIDTH +#define INT_FAST16_WIDTH __INT_FAST16_WIDTH__ +#undef UINT_FAST16_WIDTH +#define UINT_FAST16_WIDTH __INT_FAST16_WIDTH__ +#undef INT_FAST32_WIDTH +#define INT_FAST32_WIDTH __INT_FAST32_WIDTH__ +#undef UINT_FAST32_WIDTH +#define UINT_FAST32_WIDTH __INT_FAST32_WIDTH__ +#undef INT_FAST64_WIDTH +#define INT_FAST64_WIDTH __INT_FAST64_WIDTH__ +#undef UINT_FAST64_WIDTH +#define UINT_FAST64_WIDTH __INT_FAST64_WIDTH__ + +#ifdef __INTPTR_TYPE__ +# undef INTPTR_WIDTH +# define INTPTR_WIDTH __INTPTR_WIDTH__ +#endif +#ifdef __UINTPTR_TYPE__ +# undef UINTPTR_WIDTH +# define UINTPTR_WIDTH __INTPTR_WIDTH__ +#endif + +#undef INTMAX_WIDTH +#define INTMAX_WIDTH __INTMAX_WIDTH__ +#undef UINTMAX_WIDTH +#define UINTMAX_WIDTH __INTMAX_WIDTH__ + +#undef PTRDIFF_WIDTH +#define PTRDIFF_WIDTH __PTRDIFF_WIDTH__ + +#undef SIG_ATOMIC_WIDTH +#define SIG_ATOMIC_WIDTH __SIG_ATOMIC_WIDTH__ + +#undef SIZE_WIDTH +#define SIZE_WIDTH __SIZE_WIDTH__ + +#undef WCHAR_WIDTH +#define WCHAR_WIDTH __WCHAR_WIDTH__ + +#undef WINT_WIDTH +#define WINT_WIDTH __WINT_WIDTH__ + +#endif + +#endif /* _GCC_STDINT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/stdint.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdint.h new file mode 100644 index 0000000..83b6f70 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdint.h @@ -0,0 +1,14 @@ +#ifndef _GCC_WRAP_STDINT_H +#if __STDC_HOSTED__ +# if defined __cplusplus && __cplusplus >= 201103L +# undef __STDC_LIMIT_MACROS +# define __STDC_LIMIT_MACROS +# undef __STDC_CONSTANT_MACROS +# define __STDC_CONSTANT_MACROS +# endif +# include_next +#else +# include "stdint-gcc.h" +#endif +#define _GCC_WRAP_STDINT_H +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/stdnoreturn.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdnoreturn.h new file mode 100644 index 0000000..efe437e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/stdnoreturn.h @@ -0,0 +1,35 @@ +/* Copyright (C) 2011-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* ISO C1X: 7.23 _Noreturn . */ + +#ifndef _STDNORETURN_H +#define _STDNORETURN_H + +#ifndef __cplusplus + +#define noreturn _Noreturn + +#endif + +#endif /* stdnoreturn.h */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/tbmintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/tbmintrin.h new file mode 100644 index 0000000..f4ceef8 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/tbmintrin.h @@ -0,0 +1,180 @@ +/* Copyright (C) 2010-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _TBMINTRIN_H_INCLUDED +#define _TBMINTRIN_H_INCLUDED + +#ifndef __TBM__ +#pragma GCC push_options +#pragma GCC target("tbm") +#define __DISABLE_TBM__ +#endif /* __TBM__ */ + +#ifdef __OPTIMIZE__ +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bextri_u32 (unsigned int __X, const unsigned int __I) +{ + return __builtin_ia32_bextri_u32 (__X, __I); +} +#else +#define __bextri_u32(X, I) \ + ((unsigned int)__builtin_ia32_bextri_u32 ((unsigned int)(X), \ + (unsigned int)(I))) +#endif /*__OPTIMIZE__ */ + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcfill_u32 (unsigned int __X) +{ + return __X & (__X + 1); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blci_u32 (unsigned int __X) +{ + return __X | ~(__X + 1); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcic_u32 (unsigned int __X) +{ + return ~__X & (__X + 1); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcmsk_u32 (unsigned int __X) +{ + return __X ^ (__X + 1); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcs_u32 (unsigned int __X) +{ + return __X | (__X + 1); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsfill_u32 (unsigned int __X) +{ + return __X | (__X - 1); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsic_u32 (unsigned int __X) +{ + return ~__X | (__X - 1); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__t1mskc_u32 (unsigned int __X) +{ + return ~__X | (__X + 1); +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__tzmsk_u32 (unsigned int __X) +{ + return ~__X & (__X - 1); +} + + + +#ifdef __x86_64__ +#ifdef __OPTIMIZE__ +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__bextri_u64 (unsigned long long __X, const unsigned int __I) +{ + return __builtin_ia32_bextri_u64 (__X, __I); +} +#else +#define __bextri_u64(X, I) \ + ((unsigned long long)__builtin_ia32_bextri_u64 ((unsigned long long)(X), \ + (unsigned long long)(I))) +#endif /*__OPTIMIZE__ */ + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcfill_u64 (unsigned long long __X) +{ + return __X & (__X + 1); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blci_u64 (unsigned long long __X) +{ + return __X | ~(__X + 1); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcic_u64 (unsigned long long __X) +{ + return ~__X & (__X + 1); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcmsk_u64 (unsigned long long __X) +{ + return __X ^ (__X + 1); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blcs_u64 (unsigned long long __X) +{ + return __X | (__X + 1); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsfill_u64 (unsigned long long __X) +{ + return __X | (__X - 1); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__blsic_u64 (unsigned long long __X) +{ + return ~__X | (__X - 1); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__t1mskc_u64 (unsigned long long __X) +{ + return ~__X | (__X + 1); +} + +extern __inline unsigned long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +__tzmsk_u64 (unsigned long long __X) +{ + return ~__X & (__X - 1); +} + + +#endif /* __x86_64__ */ + +#ifdef __DISABLE_TBM__ +#undef __DISABLE_TBM__ +#pragma GCC pop_options +#endif /* __DISABLE_TBM__ */ + +#endif /* _TBMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/tmmintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/tmmintrin.h new file mode 100644 index 0000000..aac7a76 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/tmmintrin.h @@ -0,0 +1,249 @@ +/* Copyright (C) 2006-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.1. */ + +#ifndef _TMMINTRIN_H_INCLUDED +#define _TMMINTRIN_H_INCLUDED + +/* We need definitions from the SSE3, SSE2 and SSE header files*/ +#include + +#ifndef __SSSE3__ +#pragma GCC push_options +#pragma GCC target("ssse3") +#define __DISABLE_SSSE3__ +#endif /* __SSSE3__ */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phaddw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phaddd128 ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadds_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phaddsw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phaddw ((__v4hi)__X, (__v4hi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadd_pi32 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phaddd ((__v2si)__X, (__v2si)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hadds_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phaddsw ((__v4hi)__X, (__v4hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phsubw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phsubd128 ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsubs_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_phsubsw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phsubw ((__v4hi)__X, (__v4hi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsub_pi32 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phsubd ((__v2si)__X, (__v2si)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsubs_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_phsubsw ((__v4hi)__X, (__v4hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddubs_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmaddubsw128 ((__v16qi)__X, (__v16qi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddubs_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_pmaddubsw ((__v8qi)__X, (__v8qi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhrs_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmulhrsw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhrs_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_pmulhrsw ((__v4hi)__X, (__v4hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pshufb128 ((__v16qi)__X, (__v16qi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_pi8 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_pshufb ((__v8qi)__X, (__v8qi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_epi8 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psignb128 ((__v16qi)__X, (__v16qi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_epi16 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psignw128 ((__v8hi)__X, (__v8hi)__Y); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_epi32 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_psignd128 ((__v4si)__X, (__v4si)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_pi8 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_psignb ((__v8qi)__X, (__v8qi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_pi16 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_psignw ((__v4hi)__X, (__v4hi)__Y); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sign_pi32 (__m64 __X, __m64 __Y) +{ + return (__m64) __builtin_ia32_psignd ((__v2si)__X, (__v2si)__Y); +} + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_alignr_epi8(__m128i __X, __m128i __Y, const int __N) +{ + return (__m128i) __builtin_ia32_palignr128 ((__v2di)__X, + (__v2di)__Y, __N * 8); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_alignr_pi8(__m64 __X, __m64 __Y, const int __N) +{ + return (__m64) __builtin_ia32_palignr ((__v1di)__X, + (__v1di)__Y, __N * 8); +} +#else +#define _mm_alignr_epi8(X, Y, N) \ + ((__m128i) __builtin_ia32_palignr128 ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), \ + (int)(N) * 8)) +#define _mm_alignr_pi8(X, Y, N) \ + ((__m64) __builtin_ia32_palignr ((__v1di)(__m64)(X), \ + (__v1di)(__m64)(Y), \ + (int)(N) * 8)) +#endif + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_epi8 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pabsb128 ((__v16qi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_epi16 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pabsw128 ((__v8hi)__X); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_epi32 (__m128i __X) +{ + return (__m128i) __builtin_ia32_pabsd128 ((__v4si)__X); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_pi8 (__m64 __X) +{ + return (__m64) __builtin_ia32_pabsb ((__v8qi)__X); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_pi16 (__m64 __X) +{ + return (__m64) __builtin_ia32_pabsw ((__v4hi)__X); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_abs_pi32 (__m64 __X) +{ + return (__m64) __builtin_ia32_pabsd ((__v2si)__X); +} + +#ifdef __DISABLE_SSSE3__ +#undef __DISABLE_SSSE3__ +#pragma GCC pop_options +#endif /* __DISABLE_SSSE3__ */ + +#endif /* _TMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/tsxldtrkintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/tsxldtrkintrin.h new file mode 100644 index 0000000..14730c5 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/tsxldtrkintrin.h @@ -0,0 +1,56 @@ +/* Copyright (C) 2020-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _TSXLDTRKINTRIN_H_INCLUDED +#define _TSXLDTRKINTRIN_H_INCLUDED + +#if !defined(__TSXLDTRK__) +#pragma GCC push_options +#pragma GCC target("tsxldtrk") +#define __DISABLE_TSXLDTRK__ +#endif /* __TSXLDTRK__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xsusldtrk (void) +{ + __builtin_ia32_xsusldtrk (); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xresldtrk (void) +{ + __builtin_ia32_xresldtrk (); +} + +#ifdef __DISABLE_TSXLDTRK__ +#undef __DISABLE_TSXLDTRK__ +#pragma GCC pop_options +#endif /* __DISABLE_TSXLDTRK__ */ + +#endif /* _TSXLDTRKINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/uintrintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/uintrintrin.h new file mode 100644 index 0000000..7c0ee42 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/uintrintrin.h @@ -0,0 +1,84 @@ +/* Copyright (C) 2020-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _UINTRNTRIN_H_INCLUDED +#define _UINTRNTRIN_H_INCLUDED + +#ifdef __x86_64__ + +#ifndef __UINTR__ +#pragma GCC push_options +#pragma GCC target ("uintr") +#define __DISABLE_UINTR__ +#endif /* __UINTR__ */ + +struct __uintr_frame +{ + /* RIP of the interrupted user process. */ + unsigned long long rip; + /* RFLAGS of the interrupted user process. */ + unsigned long long rflags; + /* RSP of the interrupted user process. */ + unsigned long long rsp; +}; + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_clui (void) +{ + __builtin_ia32_clui (); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_stui (void) +{ + __builtin_ia32_stui (); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_senduipi (unsigned long long __R) +{ + __builtin_ia32_senduipi (__R); +} + +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_testui (void) +{ + return __builtin_ia32_testui (); +} + +#ifdef __DISABLE_UINTR__ +#undef __DISABLE_UINTR__ +#pragma GCC pop_options +#endif /* __DISABLE_UINTR__ */ + +#endif + +#endif /* _UINTRNTRIN_H_INCLUDED. */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/unwind.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/unwind.h new file mode 100644 index 0000000..a87c9b3 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/unwind.h @@ -0,0 +1,297 @@ +/* Exception handling and frame unwind runtime interface routines. + Copyright (C) 2001-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* This is derived from the C++ ABI for IA-64. Where we diverge + for cross-architecture compatibility are noted with "@@@". */ + +#ifndef _UNWIND_H +#define _UNWIND_H + +#if defined (__SEH__) && !defined (__USING_SJLJ_EXCEPTIONS__) +/* Only for _GCC_specific_handler. */ +#include +#endif + +#ifndef HIDE_EXPORTS +#pragma GCC visibility push(default) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Level 1: Base ABI */ + +/* @@@ The IA-64 ABI uses uint64 throughout. Most places this is + inefficient for 32-bit and smaller machines. */ +typedef unsigned _Unwind_Word __attribute__((__mode__(__unwind_word__))); +typedef signed _Unwind_Sword __attribute__((__mode__(__unwind_word__))); +#if defined(__ia64__) && defined(__hpux__) +typedef unsigned _Unwind_Ptr __attribute__((__mode__(__word__))); +#else +typedef unsigned _Unwind_Ptr __attribute__((__mode__(__pointer__))); +#endif +typedef unsigned _Unwind_Internal_Ptr __attribute__((__mode__(__pointer__))); + +/* @@@ The IA-64 ABI uses a 64-bit word to identify the producer and + consumer of an exception. We'll go along with this for now even on + 32-bit machines. We'll need to provide some other option for + 16-bit machines and for machines with > 8 bits per byte. */ +typedef unsigned _Unwind_Exception_Class __attribute__((__mode__(__DI__))); + +/* The unwind interface uses reason codes in several contexts to + identify the reasons for failures or other actions. */ +typedef enum +{ + _URC_NO_REASON = 0, + _URC_FOREIGN_EXCEPTION_CAUGHT = 1, + _URC_FATAL_PHASE2_ERROR = 2, + _URC_FATAL_PHASE1_ERROR = 3, + _URC_NORMAL_STOP = 4, + _URC_END_OF_STACK = 5, + _URC_HANDLER_FOUND = 6, + _URC_INSTALL_CONTEXT = 7, + _URC_CONTINUE_UNWIND = 8 +} _Unwind_Reason_Code; + + +/* The unwind interface uses a pointer to an exception header object + as its representation of an exception being thrown. In general, the + full representation of an exception object is language- and + implementation-specific, but it will be prefixed by a header + understood by the unwind interface. */ + +struct _Unwind_Exception; + +typedef void (*_Unwind_Exception_Cleanup_Fn) (_Unwind_Reason_Code, + struct _Unwind_Exception *); + +struct _Unwind_Exception +{ + _Unwind_Exception_Class exception_class; + _Unwind_Exception_Cleanup_Fn exception_cleanup; + +#if !defined (__USING_SJLJ_EXCEPTIONS__) && defined (__SEH__) + _Unwind_Word private_[6]; +#else + _Unwind_Word private_1; + _Unwind_Word private_2; +#endif + + /* @@@ The IA-64 ABI says that this structure must be double-word aligned. + Taking that literally does not make much sense generically. Instead we + provide the maximum alignment required by any type for the machine. */ +} __attribute__((__aligned__)); + + +/* The ACTIONS argument to the personality routine is a bitwise OR of one + or more of the following constants. */ +typedef int _Unwind_Action; + +#define _UA_SEARCH_PHASE 1 +#define _UA_CLEANUP_PHASE 2 +#define _UA_HANDLER_FRAME 4 +#define _UA_FORCE_UNWIND 8 +#define _UA_END_OF_STACK 16 + +/* The target can override this macro to define any back-end-specific + attributes required for the lowest-level stack frame. */ +#ifndef LIBGCC2_UNWIND_ATTRIBUTE +#define LIBGCC2_UNWIND_ATTRIBUTE +#endif + +/* This is an opaque type used to refer to a system-specific data + structure used by the system unwinder. This context is created and + destroyed by the system, and passed to the personality routine + during unwinding. */ +struct _Unwind_Context; + +/* Raise an exception, passing along the given exception object. */ +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_RaiseException (struct _Unwind_Exception *); + +/* Raise an exception for forced unwinding. */ + +typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn) + (int, _Unwind_Action, _Unwind_Exception_Class, + struct _Unwind_Exception *, struct _Unwind_Context *, void *); + +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_ForcedUnwind (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *); + +/* Helper to invoke the exception_cleanup routine. */ +extern void _Unwind_DeleteException (struct _Unwind_Exception *); + +/* Resume propagation of an existing exception. This is used after + e.g. executing cleanup code, and not to implement rethrowing. */ +extern void LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_Resume (struct _Unwind_Exception *); + +/* @@@ Resume propagation of a FORCE_UNWIND exception, or to rethrow + a normal exception that was handled. */ +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_Resume_or_Rethrow (struct _Unwind_Exception *); + +/* @@@ Use unwind data to perform a stack backtrace. The trace callback + is called for every stack frame in the call chain, but no cleanup + actions are performed. */ +typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) + (struct _Unwind_Context *, void *); + +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_Backtrace (_Unwind_Trace_Fn, void *); + +/* These functions are used for communicating information about the unwind + context (i.e. the unwind descriptors and the user register state) between + the unwind library and the personality routine and landing pad. Only + selected registers may be manipulated. */ + +extern _Unwind_Word _Unwind_GetGR (struct _Unwind_Context *, int); +extern void _Unwind_SetGR (struct _Unwind_Context *, int, _Unwind_Word); + +extern _Unwind_Ptr _Unwind_GetIP (struct _Unwind_Context *); +extern _Unwind_Ptr _Unwind_GetIPInfo (struct _Unwind_Context *, int *); +extern void _Unwind_SetIP (struct _Unwind_Context *, _Unwind_Ptr); + +/* @@@ Retrieve the CFA of the given context. */ +extern _Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *); + +extern void *_Unwind_GetLanguageSpecificData (struct _Unwind_Context *); + +extern _Unwind_Ptr _Unwind_GetRegionStart (struct _Unwind_Context *); + + +/* The personality routine is the function in the C++ (or other language) + runtime library which serves as an interface between the system unwind + library and language-specific exception handling semantics. It is + specific to the code fragment described by an unwind info block, and + it is always referenced via the pointer in the unwind info block, and + hence it has no ABI-specified name. + + Note that this implies that two different C++ implementations can + use different names, and have different contents in the language + specific data area. Moreover, that the language specific data + area contains no version info because name of the function invoked + provides more effective versioning by detecting at link time the + lack of code to handle the different data format. */ + +typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn) + (int, _Unwind_Action, _Unwind_Exception_Class, + struct _Unwind_Exception *, struct _Unwind_Context *); + +/* @@@ The following alternate entry points are for setjmp/longjmp + based unwinding. */ + +struct SjLj_Function_Context; +extern void _Unwind_SjLj_Register (struct SjLj_Function_Context *); +extern void _Unwind_SjLj_Unregister (struct SjLj_Function_Context *); + +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_SjLj_RaiseException (struct _Unwind_Exception *); +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_SjLj_ForcedUnwind (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *); +extern void LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_SjLj_Resume (struct _Unwind_Exception *); +extern _Unwind_Reason_Code LIBGCC2_UNWIND_ATTRIBUTE +_Unwind_SjLj_Resume_or_Rethrow (struct _Unwind_Exception *); + +/* @@@ The following provide access to the base addresses for text + and data-relative addressing in the LDSA. In order to stay link + compatible with the standard ABI for IA-64, we inline these. */ + +#ifdef __ia64__ +static inline _Unwind_Ptr +_Unwind_GetDataRelBase (struct _Unwind_Context *_C) +{ + /* The GP is stored in R1. */ + return _Unwind_GetGR (_C, 1); +} + +static inline _Unwind_Ptr +_Unwind_GetTextRelBase (struct _Unwind_Context *_C __attribute__ ((__unused__))) +{ + __builtin_abort (); + return 0; +} + +/* @@@ Retrieve the Backing Store Pointer of the given context. */ +extern _Unwind_Word _Unwind_GetBSP (struct _Unwind_Context *); +#else +extern _Unwind_Ptr _Unwind_GetDataRelBase (struct _Unwind_Context *); +extern _Unwind_Ptr _Unwind_GetTextRelBase (struct _Unwind_Context *); +#endif + +/* @@@ Given an address, return the entry point of the function that + contains it. */ +extern void * _Unwind_FindEnclosingFunction (void *pc); + +#ifndef __SIZEOF_LONG__ + #error "__SIZEOF_LONG__ macro not defined" +#endif + +#ifndef __SIZEOF_POINTER__ + #error "__SIZEOF_POINTER__ macro not defined" +#endif + + +/* leb128 type numbers have a potentially unlimited size. + The target of the following definitions of _sleb128_t and _uleb128_t + is to have efficient data types large enough to hold the leb128 type + numbers used in the unwind code. + Mostly these types will simply be defined to long and unsigned long + except when a unsigned long data type on the target machine is not + capable of storing a pointer. */ + +#if __SIZEOF_LONG__ >= __SIZEOF_POINTER__ + typedef long _sleb128_t; + typedef unsigned long _uleb128_t; +#elif __SIZEOF_LONG_LONG__ >= __SIZEOF_POINTER__ + typedef long long _sleb128_t; + typedef unsigned long long _uleb128_t; +#else +# error "What type shall we use for _sleb128_t?" +#endif + +#if defined (__SEH__) && !defined (__USING_SJLJ_EXCEPTIONS__) +/* Handles the mapping from SEH to GCC interfaces. */ +EXCEPTION_DISPOSITION _GCC_specific_handler (PEXCEPTION_RECORD, void *, + PCONTEXT, PDISPATCHER_CONTEXT, + _Unwind_Personality_Fn); +#endif + +#ifdef __cplusplus +} +#endif + +#ifndef HIDE_EXPORTS +#pragma GCC visibility pop +#endif + +/* Additional actions to unwind number of stack frames. */ +#define _Unwind_Frames_Extra(frames) + +/* Increment frame count. */ +#define _Unwind_Frames_Increment(context, frames) frames++ + +#endif /* unwind.h */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/vaesintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/vaesintrin.h new file mode 100644 index 0000000..11aa660 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/vaesintrin.h @@ -0,0 +1,111 @@ +/* Copyright (C) 2017-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef __VAESINTRIN_H_INCLUDED +#define __VAESINTRIN_H_INCLUDED + +#if !defined(__VAES__) || !defined(__AVX__) +#pragma GCC push_options +#pragma GCC target("vaes,avx") +#define __DISABLE_VAES__ +#endif /* __VAES__ */ + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_aesdec_epi128 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_vaesdec_v32qi ((__v32qi) __A, (__v32qi) __B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_aesdeclast_epi128 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_vaesdeclast_v32qi ((__v32qi) __A, + (__v32qi) __B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_aesenc_epi128 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_vaesenc_v32qi ((__v32qi) __A, (__v32qi) __B); +} + +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_aesenclast_epi128 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_vaesenclast_v32qi ((__v32qi) __A, + (__v32qi) __B); +} + +#ifdef __DISABLE_VAES__ +#undef __DISABLE_VAES__ +#pragma GCC pop_options +#endif /* __DISABLE_VAES__ */ + + +#if !defined(__VAES__) || !defined(__AVX512F__) +#pragma GCC push_options +#pragma GCC target("vaes,avx512f") +#define __DISABLE_VAESF__ +#endif /* __VAES__ */ + + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_aesdec_epi128 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_vaesdec_v64qi ((__v64qi) __A, (__v64qi) __B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_aesdeclast_epi128 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_vaesdeclast_v64qi ((__v64qi) __A, + (__v64qi) __B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_aesenc_epi128 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_vaesenc_v64qi ((__v64qi) __A, (__v64qi) __B); +} + +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_aesenclast_epi128 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_vaesenclast_v64qi ((__v64qi) __A, + (__v64qi) __B); +} + +#ifdef __DISABLE_VAESF__ +#undef __DISABLE_VAESF__ +#pragma GCC pop_options +#endif /* __DISABLE_VAES__ */ + +#endif /* __VAESINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/varargs.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/varargs.h new file mode 100644 index 0000000..4b9803e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/varargs.h @@ -0,0 +1,7 @@ +#ifndef _VARARGS_H +#define _VARARGS_H + +#error "GCC no longer implements ." +#error "Revise your code to use ." + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/vpclmulqdqintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/vpclmulqdqintrin.h new file mode 100644 index 0000000..a0fe3ee --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/vpclmulqdqintrin.h @@ -0,0 +1,81 @@ +/* Copyright (C) 2014-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _IMMINTRIN_H_INCLUDED +#error "Never use directly; include instead." +#endif + +#ifndef _VPCLMULQDQINTRIN_H_INCLUDED +#define _VPCLMULQDQINTRIN_H_INCLUDED + +#if !defined(__VPCLMULQDQ__) || !defined(__AVX512F__) +#pragma GCC push_options +#pragma GCC target("vpclmulqdq,avx512f") +#define __DISABLE_VPCLMULQDQF__ +#endif /* __VPCLMULQDQF__ */ + +#ifdef __OPTIMIZE__ +extern __inline __m512i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm512_clmulepi64_epi128 (__m512i __A, __m512i __B, const int __C) +{ + return (__m512i) __builtin_ia32_vpclmulqdq_v8di ((__v8di)__A, + (__v8di) __B, __C); +} +#else +#define _mm512_clmulepi64_epi128(A, B, C) \ + ((__m512i) __builtin_ia32_vpclmulqdq_v8di ((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(C))) +#endif + +#ifdef __DISABLE_VPCLMULQDQF__ +#undef __DISABLE_VPCLMULQDQF__ +#pragma GCC pop_options +#endif /* __DISABLE_VPCLMULQDQF__ */ + +#if !defined(__VPCLMULQDQ__) || !defined(__AVX__) +#pragma GCC push_options +#pragma GCC target("vpclmulqdq,avx") +#define __DISABLE_VPCLMULQDQ__ +#endif /* __VPCLMULQDQ__ */ + +#ifdef __OPTIMIZE__ +extern __inline __m256i +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_clmulepi64_epi128 (__m256i __A, __m256i __B, const int __C) +{ + return (__m256i) __builtin_ia32_vpclmulqdq_v4di ((__v4di)__A, + (__v4di) __B, __C); +} +#else +#define _mm256_clmulepi64_epi128(A, B, C) \ + ((__m256i) __builtin_ia32_vpclmulqdq_v4di ((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), (int)(C))) +#endif + +#ifdef __DISABLE_VPCLMULQDQ__ +#undef __DISABLE_VPCLMULQDQ__ +#pragma GCC pop_options +#endif /* __DISABLE_VPCLMULQDQ__ */ + +#endif /* _VPCLMULQDQINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/waitpkgintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/waitpkgintrin.h new file mode 100644 index 0000000..25c0c89 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/waitpkgintrin.h @@ -0,0 +1,63 @@ +/* Copyright (C) 2018-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _WAITPKG_H_INCLUDED +#define _WAITPKG_H_INCLUDED + +#ifndef __WAITPKG__ +#pragma GCC push_options +#pragma GCC target("waitpkg") +#define __DISABLE_WAITPKG__ +#endif /* __WAITPKG__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_umonitor (void *__A) +{ + __builtin_ia32_umonitor (__A); +} + +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_umwait (unsigned int __A, unsigned long long __B) +{ + return __builtin_ia32_umwait (__A, __B); +} + +extern __inline unsigned char +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_tpause (unsigned int __A, unsigned long long __B) +{ + return __builtin_ia32_tpause (__A, __B); +} + +#ifdef __DISABLE_WAITPKG__ +#undef __DISABLE_WAITPKG__ +#pragma GCC pop_options +#endif /* __DISABLE_WAITPKG__ */ + +#endif /* _WAITPKG_H_INCLUDED. */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/wbnoinvdintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/wbnoinvdintrin.h new file mode 100644 index 0000000..d010aee --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/wbnoinvdintrin.h @@ -0,0 +1,49 @@ +/* Copyright (C) 2018-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _WBNOINVDINTRIN_H_INCLUDED +#define _WBNOINVDINTRIN_H_INCLUDED + +#ifndef __WBNOINVD__ +#pragma GCC push_options +#pragma GCC target("wbnoinvd") +#define __DISABLE_WBNOINVD__ +#endif /* __WBNOINVD__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_wbnoinvd (void) +{ + __builtin_ia32_wbnoinvd (); +} + +#ifdef __DISABLE_WBNOINVD__ +#undef __DISABLE_WBNOINVD__ +#pragma GCC pop_options +#endif /* __DISABLE_WBNOINVD__ */ + +#endif /* _WBNOINVDINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/wmmintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/wmmintrin.h new file mode 100644 index 0000000..9e987b3 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/wmmintrin.h @@ -0,0 +1,132 @@ +/* Copyright (C) 2008-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 10.1. */ + +#ifndef _WMMINTRIN_H_INCLUDED +#define _WMMINTRIN_H_INCLUDED + +/* We need definitions from the SSE2 header file. */ +#include + +/* AES */ + +#if !defined(__AES__) || !defined(__SSE2__) +#pragma GCC push_options +#pragma GCC target("aes,sse2") +#define __DISABLE_AES__ +#endif /* __AES__ */ + +/* Performs 1 round of AES decryption of the first m128i using + the second m128i as a round key. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesdec_si128 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_aesdec128 ((__v2di)__X, (__v2di)__Y); +} + +/* Performs the last round of AES decryption of the first m128i + using the second m128i as a round key. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesdeclast_si128 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_aesdeclast128 ((__v2di)__X, + (__v2di)__Y); +} + +/* Performs 1 round of AES encryption of the first m128i using + the second m128i as a round key. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesenc_si128 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_aesenc128 ((__v2di)__X, (__v2di)__Y); +} + +/* Performs the last round of AES encryption of the first m128i + using the second m128i as a round key. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesenclast_si128 (__m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_aesenclast128 ((__v2di)__X, (__v2di)__Y); +} + +/* Performs the InverseMixColumn operation on the source m128i + and stores the result into m128i destination. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aesimc_si128 (__m128i __X) +{ + return (__m128i) __builtin_ia32_aesimc128 ((__v2di)__X); +} + +/* Generates a m128i round key for the input m128i AES cipher key and + byte round constant. The second parameter must be a compile time + constant. */ +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_aeskeygenassist_si128 (__m128i __X, const int __C) +{ + return (__m128i) __builtin_ia32_aeskeygenassist128 ((__v2di)__X, __C); +} +#else +#define _mm_aeskeygenassist_si128(X, C) \ + ((__m128i) __builtin_ia32_aeskeygenassist128 ((__v2di)(__m128i)(X), \ + (int)(C))) +#endif + +#ifdef __DISABLE_AES__ +#undef __DISABLE_AES__ +#pragma GCC pop_options +#endif /* __DISABLE_AES__ */ + +/* PCLMUL */ + +#if !defined(__PCLMUL__) || !defined(__SSE2__) +#pragma GCC push_options +#pragma GCC target("pclmul,sse2") +#define __DISABLE_PCLMUL__ +#endif /* __PCLMUL__ */ + +/* Performs carry-less integer multiplication of 64-bit halves of + 128-bit input operands. The third parameter inducates which 64-bit + haves of the input parameters v1 and v2 should be used. It must be + a compile time constant. */ +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_clmulepi64_si128 (__m128i __X, __m128i __Y, const int __I) +{ + return (__m128i) __builtin_ia32_pclmulqdq128 ((__v2di)__X, + (__v2di)__Y, __I); +} +#else +#define _mm_clmulepi64_si128(X, Y, I) \ + ((__m128i) __builtin_ia32_pclmulqdq128 ((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (int)(I))) +#endif + +#ifdef __DISABLE_PCLMUL__ +#undef __DISABLE_PCLMUL__ +#pragma GCC pop_options +#endif /* __DISABLE_PCLMUL__ */ + +#endif /* _WMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/x86gprintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/x86gprintrin.h new file mode 100644 index 0000000..e0be01d --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/x86gprintrin.h @@ -0,0 +1,269 @@ +/* Copyright (C) 2020-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +#define _X86GPRINTRIN_H_INCLUDED + +#if !defined _SOFT_FLOAT || defined __MMX__ || defined __SSE__ +#pragma GCC push_options +#pragma GCC target("general-regs-only") +#define __DISABLE_GENERAL_REGS_ONLY__ +#endif + +#include + +#ifndef __iamcu__ + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_wbinvd (void) +{ + __builtin_ia32_wbinvd (); +} + +#ifndef __RDRND__ +#pragma GCC push_options +#pragma GCC target("rdrnd") +#define __DISABLE_RDRND__ +#endif /* __RDRND__ */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_rdrand16_step (unsigned short *__P) +{ + return __builtin_ia32_rdrand16_step (__P); +} + +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_rdrand32_step (unsigned int *__P) +{ + return __builtin_ia32_rdrand32_step (__P); +} +#ifdef __DISABLE_RDRND__ +#undef __DISABLE_RDRND__ +#pragma GCC pop_options +#endif /* __DISABLE_RDRND__ */ + +#ifndef __RDPID__ +#pragma GCC push_options +#pragma GCC target("rdpid") +#define __DISABLE_RDPID__ +#endif /* __RDPID__ */ +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_rdpid_u32 (void) +{ + return __builtin_ia32_rdpid (); +} +#ifdef __DISABLE_RDPID__ +#undef __DISABLE_RDPID__ +#pragma GCC pop_options +#endif /* __DISABLE_RDPID__ */ + +#ifdef __x86_64__ + +#ifndef __FSGSBASE__ +#pragma GCC push_options +#pragma GCC target("fsgsbase") +#define __DISABLE_FSGSBASE__ +#endif /* __FSGSBASE__ */ +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_readfsbase_u32 (void) +{ + return __builtin_ia32_rdfsbase32 (); +} + +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_readfsbase_u64 (void) +{ + return __builtin_ia32_rdfsbase64 (); +} + +extern __inline unsigned int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_readgsbase_u32 (void) +{ + return __builtin_ia32_rdgsbase32 (); +} + +extern __inline unsigned long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_readgsbase_u64 (void) +{ + return __builtin_ia32_rdgsbase64 (); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_writefsbase_u32 (unsigned int __B) +{ + __builtin_ia32_wrfsbase32 (__B); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_writefsbase_u64 (unsigned long long __B) +{ + __builtin_ia32_wrfsbase64 (__B); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_writegsbase_u32 (unsigned int __B) +{ + __builtin_ia32_wrgsbase32 (__B); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_writegsbase_u64 (unsigned long long __B) +{ + __builtin_ia32_wrgsbase64 (__B); +} +#ifdef __DISABLE_FSGSBASE__ +#undef __DISABLE_FSGSBASE__ +#pragma GCC pop_options +#endif /* __DISABLE_FSGSBASE__ */ + +#ifndef __RDRND__ +#pragma GCC push_options +#pragma GCC target("rdrnd") +#define __DISABLE_RDRND__ +#endif /* __RDRND__ */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_rdrand64_step (unsigned long long *__P) +{ + return __builtin_ia32_rdrand64_step (__P); +} +#ifdef __DISABLE_RDRND__ +#undef __DISABLE_RDRND__ +#pragma GCC pop_options +#endif /* __DISABLE_RDRND__ */ + +#endif /* __x86_64__ */ + +#ifndef __PTWRITE__ +#pragma GCC push_options +#pragma GCC target("ptwrite") +#define __DISABLE_PTWRITE__ +#endif + +#ifdef __x86_64__ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_ptwrite64 (unsigned long long __B) +{ + __builtin_ia32_ptwrite64 (__B); +} +#endif /* __x86_64__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_ptwrite32 (unsigned __B) +{ + __builtin_ia32_ptwrite32 (__B); +} +#ifdef __DISABLE_PTWRITE__ +#undef __DISABLE_PTWRITE__ +#pragma GCC pop_options +#endif /* __DISABLE_PTWRITE__ */ + +#endif /* __iamcu__ */ + +#ifdef __DISABLE_GENERAL_REGS_ONLY__ +#undef __DISABLE_GENERAL_REGS_ONLY__ +#pragma GCC pop_options +#endif /* __DISABLE_GENERAL_REGS_ONLY__ */ + +#endif /* _X86GPRINTRIN_H_INCLUDED. */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/x86intrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/x86intrin.h new file mode 100644 index 0000000..a070ac7 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/x86intrin.h @@ -0,0 +1,42 @@ +/* Copyright (C) 2008-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86INTRIN_H_INCLUDED +#define _X86INTRIN_H_INCLUDED + +#include + +#ifndef __iamcu__ + +/* For including AVX instructions */ +#include + +#include + +#include + +#include + +#endif /* __iamcu__ */ + +#endif /* _X86INTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/xmmintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/xmmintrin.h new file mode 100644 index 0000000..6265908 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/xmmintrin.h @@ -0,0 +1,1334 @@ +/* Copyright (C) 2002-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef _XMMINTRIN_H_INCLUDED +#define _XMMINTRIN_H_INCLUDED + +/* We need type definitions from the MMX header file. */ +#include + +/* Get _mm_malloc () and _mm_free (). */ +#include + +/* Constants for use with _mm_prefetch. */ +enum _mm_hint +{ + /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */ + _MM_HINT_ET0 = 7, + _MM_HINT_ET1 = 6, + _MM_HINT_T0 = 3, + _MM_HINT_T1 = 2, + _MM_HINT_T2 = 1, + _MM_HINT_NTA = 0 +}; + +/* Loads one cache line from address P to a location "closer" to the + processor. The selector I specifies the type of prefetch operation. */ +#ifdef __OPTIMIZE__ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_prefetch (const void *__P, enum _mm_hint __I) +{ + __builtin_prefetch (__P, (__I & 0x4) >> 2, __I & 0x3); +} +#else +#define _mm_prefetch(P, I) \ + __builtin_prefetch ((P), ((I & 0x4) >> 2), (I & 0x3)) +#endif + +#ifndef __SSE__ +#pragma GCC push_options +#pragma GCC target("sse") +#define __DISABLE_SSE__ +#endif /* __SSE__ */ + +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); + +/* Unaligned version of the same type. */ +typedef float __m128_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1))); + +/* Internal data types for implementing the intrinsics. */ +typedef float __v4sf __attribute__ ((__vector_size__ (16))); + +/* Create a selector for use with the SHUFPS instruction. */ +#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \ + (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0)) + +/* Bits in the MXCSR. */ +#define _MM_EXCEPT_MASK 0x003f +#define _MM_EXCEPT_INVALID 0x0001 +#define _MM_EXCEPT_DENORM 0x0002 +#define _MM_EXCEPT_DIV_ZERO 0x0004 +#define _MM_EXCEPT_OVERFLOW 0x0008 +#define _MM_EXCEPT_UNDERFLOW 0x0010 +#define _MM_EXCEPT_INEXACT 0x0020 + +#define _MM_MASK_MASK 0x1f80 +#define _MM_MASK_INVALID 0x0080 +#define _MM_MASK_DENORM 0x0100 +#define _MM_MASK_DIV_ZERO 0x0200 +#define _MM_MASK_OVERFLOW 0x0400 +#define _MM_MASK_UNDERFLOW 0x0800 +#define _MM_MASK_INEXACT 0x1000 + +#define _MM_ROUND_MASK 0x6000 +#define _MM_ROUND_NEAREST 0x0000 +#define _MM_ROUND_DOWN 0x2000 +#define _MM_ROUND_UP 0x4000 +#define _MM_ROUND_TOWARD_ZERO 0x6000 + +#define _MM_FLUSH_ZERO_MASK 0x8000 +#define _MM_FLUSH_ZERO_ON 0x8000 +#define _MM_FLUSH_ZERO_OFF 0x0000 + +/* Create an undefined vector. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_undefined_ps (void) +{ + __m128 __Y = __Y; + return __Y; +} + +/* Create a vector of zeros. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setzero_ps (void) +{ + return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f }; +} + +/* Perform the respective operation on the lower SPFP (single-precision + floating-point) values of A and B; the upper three SPFP values are + passed through from A. */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_ss (__m128 __A) +{ + return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp_ss (__m128 __A) +{ + return (__m128) __builtin_ia32_rcpss ((__v4sf)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt_ss (__m128 __A) +{ + return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B); +} + +/* Perform the respective operation on the four SPFP values in A and B. */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_add_ps (__m128 __A, __m128 __B) +{ + return (__m128) ((__v4sf)__A + (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sub_ps (__m128 __A, __m128 __B) +{ + return (__m128) ((__v4sf)__A - (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mul_ps (__m128 __A, __m128 __B) +{ + return (__m128) ((__v4sf)__A * (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_div_ps (__m128 __A, __m128 __B) +{ + return (__m128) ((__v4sf)__A / (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sqrt_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rcp_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rcpps ((__v4sf)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rsqrt_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B); +} + +/* Perform logical bit-wise operations on 128-bit values. */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_and_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_andps (__A, __B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_andnot_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_andnps (__A, __B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_or_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_orps (__A, __B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_xor_ps (__m128 __A, __m128 __B) +{ + return __builtin_ia32_xorps (__A, __B); +} + +/* Perform a comparison on the lower SPFP values of A and B. If the + comparison is true, place a mask of all ones in the result, otherwise a + mask of zeros. The upper three SPFP values are passed through from A. */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpltss ((__v4sf) __B, + (__v4sf) + __A)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpless ((__v4sf) __B, + (__v4sf) + __A)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnlt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnle_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpngt_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpnltss ((__v4sf) __B, + (__v4sf) + __A)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnge_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf) __A, + (__v4sf) + __builtin_ia32_cmpnless ((__v4sf) __B, + (__v4sf) + __A)); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpord_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpunord_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B); +} + +/* Perform a comparison on the four SPFP values of A and B. For each + element, if the comparison is true, place a mask of all ones in the + result, otherwise a mask of zeros. */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpeq_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmplt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmple_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpgt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpge_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpneq_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnlt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnle_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpngt_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpnge_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpord_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmpunord_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B); +} + +/* Compare the lower SPFP values of A and B and return 1 if true + and 0 if false. */ + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comieq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comilt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comile_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comigt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comige_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comineq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomieq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomilt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomile_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomigt_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomige_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_ucomineq_ss (__m128 __A, __m128 __B) +{ + return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B); +} + +/* Convert the lower SPFP value to a 32-bit integer according to the current + rounding mode. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_si32 (__m128 __A) +{ + return __builtin_ia32_cvtss2si ((__v4sf) __A); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_ss2si (__m128 __A) +{ + return _mm_cvtss_si32 (__A); +} + +#ifdef __x86_64__ +/* Convert the lower SPFP value to a 32-bit integer according to the + current rounding mode. */ + +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_si64 (__m128 __A) +{ + return __builtin_ia32_cvtss2si64 ((__v4sf) __A); +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_si64x (__m128 __A) +{ + return __builtin_ia32_cvtss2si64 ((__v4sf) __A); +} +#endif + +/* Convert the two lower SPFP values to 32-bit integers according to the + current rounding mode. Return the integers in packed form. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_pi32 (__m128 __A) +{ + return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_ps2pi (__m128 __A) +{ + return _mm_cvtps_pi32 (__A); +} + +/* Truncate the lower SPFP value to a 32-bit integer. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_si32 (__m128 __A) +{ + return __builtin_ia32_cvttss2si ((__v4sf) __A); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_ss2si (__m128 __A) +{ + return _mm_cvttss_si32 (__A); +} + +#ifdef __x86_64__ +/* Truncate the lower SPFP value to a 32-bit integer. */ + +/* Intel intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_si64 (__m128 __A) +{ + return __builtin_ia32_cvttss2si64 ((__v4sf) __A); +} + +/* Microsoft intrinsic. */ +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttss_si64x (__m128 __A) +{ + return __builtin_ia32_cvttss2si64 ((__v4sf) __A); +} +#endif + +/* Truncate the two lower SPFP values to 32-bit integers. Return the + integers in packed form. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvttps_pi32 (__m128 __A) +{ + return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtt_ps2pi (__m128 __A) +{ + return _mm_cvttps_pi32 (__A); +} + +/* Convert B to a SPFP value and insert it as element zero in A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi32_ss (__m128 __A, int __B) +{ + return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_si2ss (__m128 __A, int __B) +{ + return _mm_cvtsi32_ss (__A, __B); +} + +#ifdef __x86_64__ +/* Convert B to a SPFP value and insert it as element zero in A. */ + +/* Intel intrinsic. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64_ss (__m128 __A, long long __B) +{ + return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); +} + +/* Microsoft intrinsic. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtsi64x_ss (__m128 __A, long long __B) +{ + return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); +} +#endif + +/* Convert the two 32-bit values in B to SPFP form and insert them + as the two lower elements in A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi32_ps (__m128 __A, __m64 __B) +{ + return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvt_pi2ps (__m128 __A, __m64 __B) +{ + return _mm_cvtpi32_ps (__A, __B); +} + +/* Convert the four signed 16-bit values in A to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi16_ps (__m64 __A) +{ + __v4hi __sign; + __v2si __hisi, __losi; + __v4sf __zero, __ra, __rb; + + /* This comparison against zero gives us a mask that can be used to + fill in the missing sign bits in the unpack operations below, so + that we get signed values after unpacking. */ + __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A); + + /* Convert the four words to doublewords. */ + __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign); + __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign); + + /* Convert the doublewords to floating point two at a time. */ + __zero = (__v4sf) _mm_setzero_ps (); + __ra = __builtin_ia32_cvtpi2ps (__zero, __losi); + __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi); + + return (__m128) __builtin_ia32_movlhps (__ra, __rb); +} + +/* Convert the four unsigned 16-bit values in A to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpu16_ps (__m64 __A) +{ + __v2si __hisi, __losi; + __v4sf __zero, __ra, __rb; + + /* Convert the four words to doublewords. */ + __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL); + __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL); + + /* Convert the doublewords to floating point two at a time. */ + __zero = (__v4sf) _mm_setzero_ps (); + __ra = __builtin_ia32_cvtpi2ps (__zero, __losi); + __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi); + + return (__m128) __builtin_ia32_movlhps (__ra, __rb); +} + +/* Convert the low four signed 8-bit values in A to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi8_ps (__m64 __A) +{ + __v8qi __sign; + + /* This comparison against zero gives us a mask that can be used to + fill in the missing sign bits in the unpack operations below, so + that we get signed values after unpacking. */ + __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A); + + /* Convert the four low bytes to words. */ + __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign); + + return _mm_cvtpi16_ps(__A); +} + +/* Convert the low four unsigned 8-bit values in A to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpu8_ps(__m64 __A) +{ + __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL); + return _mm_cvtpu16_ps(__A); +} + +/* Convert the four signed 32-bit values in A and B to SPFP form. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtpi32x2_ps(__m64 __A, __m64 __B) +{ + __v4sf __zero = (__v4sf) _mm_setzero_ps (); + __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A); + __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B); + return (__m128) __builtin_ia32_movlhps (__sfa, __sfb); +} + +/* Convert the four SPFP values in A to four signed 16-bit integers. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_pi16(__m128 __A) +{ + __v4sf __hisf = (__v4sf)__A; + __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf); + __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf); + __v2si __losi = __builtin_ia32_cvtps2pi (__losf); + return (__m64) __builtin_ia32_packssdw (__hisi, __losi); +} + +/* Convert the four SPFP values in A to four signed 8-bit integers. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtps_pi8(__m128 __A) +{ + __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A); + return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL); +} + +/* Selects four specific SPFP values from A and B based on MASK. */ +#ifdef __OPTIMIZE__ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask) +{ + return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask); +} +#else +#define _mm_shuffle_ps(A, B, MASK) \ + ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(MASK))) +#endif + +/* Selects and interleaves the upper two SPFP values from A and B. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpackhi_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B); +} + +/* Selects and interleaves the lower two SPFP values from A and B. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_unpacklo_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B); +} + +/* Sets the upper two SPFP values with 64-bits of data loaded from P; + the lower two values are passed through from A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadh_pi (__m128 __A, __m64 const *__P) +{ + return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P); +} + +/* Stores the upper two SPFP values of A into P. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeh_pi (__m64 *__P, __m128 __A) +{ + __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A); +} + +/* Moves the upper two values of B into the lower two values of A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movehl_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B); +} + +/* Moves the lower two values of B into the upper two values of A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movelh_ps (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B); +} + +/* Sets the lower two SPFP values with 64-bits of data loaded from P; + the upper two values are passed through from A. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadl_pi (__m128 __A, __m64 const *__P) +{ + return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P); +} + +/* Stores the lower two SPFP values of A into P. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storel_pi (__m64 *__P, __m128 __A) +{ + __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A); +} + +/* Creates a 4-bit mask from the most significant bits of the SPFP values. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_ps (__m128 __A) +{ + return __builtin_ia32_movmskps ((__v4sf)__A); +} + +/* Return the contents of the control register. */ +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_getcsr (void) +{ + return __builtin_ia32_stmxcsr (); +} + +/* Read exception bits from the control register. */ +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_GET_EXCEPTION_STATE (void) +{ + return _mm_getcsr() & _MM_EXCEPT_MASK; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_GET_EXCEPTION_MASK (void) +{ + return _mm_getcsr() & _MM_MASK_MASK; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_GET_ROUNDING_MODE (void) +{ + return _mm_getcsr() & _MM_ROUND_MASK; +} + +extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_GET_FLUSH_ZERO_MODE (void) +{ + return _mm_getcsr() & _MM_FLUSH_ZERO_MASK; +} + +/* Set the control register to I. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setcsr (unsigned int __I) +{ + __builtin_ia32_ldmxcsr (__I); +} + +/* Set exception bits in the control register. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_SET_EXCEPTION_STATE(unsigned int __mask) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_SET_EXCEPTION_MASK (unsigned int __mask) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_SET_ROUNDING_MODE (unsigned int __mode) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode) +{ + _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode); +} + +/* Create a vector with element 0 as F and the rest zero. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_ss (float __F) +{ + return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f }; +} + +/* Create a vector with all four elements equal to F. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set1_ps (float __F) +{ + return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F }; +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_ps1 (float __F) +{ + return _mm_set1_ps (__F); +} + +/* Create a vector with element 0 as *P and the rest zero. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_ss (float const *__P) +{ + return _mm_set_ss (*__P); +} + +/* Create a vector with all four elements equal to *P. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load1_ps (float const *__P) +{ + return _mm_set1_ps (*__P); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_ps1 (float const *__P) +{ + return _mm_load1_ps (__P); +} + +/* Load four SPFP values from P. The address must be 16-byte aligned. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_load_ps (float const *__P) +{ + return *(__m128 *)__P; +} + +/* Load four SPFP values from P. The address need not be 16-byte aligned. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadu_ps (float const *__P) +{ + return *(__m128_u *)__P; +} + +/* Load four SPFP values in reverse order. The address must be aligned. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_loadr_ps (float const *__P) +{ + __v4sf __tmp = *(__v4sf *)__P; + return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3)); +} + +/* Create the vector [Z Y X W]. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W) +{ + return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z }; +} + +/* Create the vector [W X Y Z]. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_setr_ps (float __Z, float __Y, float __X, float __W) +{ + return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W }; +} + +/* Stores the lower SPFP value. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_ss (float *__P, __m128 __A) +{ + *__P = ((__v4sf)__A)[0]; +} + +extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cvtss_f32 (__m128 __A) +{ + return ((__v4sf)__A)[0]; +} + +/* Store four SPFP values. The address must be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_ps (float *__P, __m128 __A) +{ + *(__m128 *)__P = __A; +} + +/* Store four SPFP values. The address need not be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storeu_ps (float *__P, __m128 __A) +{ + *(__m128_u *)__P = __A; +} + +/* Store the lower SPFP value across four words. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store1_ps (float *__P, __m128 __A) +{ + __v4sf __va = (__v4sf)__A; + __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0)); + _mm_storeu_ps (__P, __tmp); +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_store_ps1 (float *__P, __m128 __A) +{ + _mm_store1_ps (__P, __A); +} + +/* Store four SPFP values in reverse order. The address must be aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_storer_ps (float *__P, __m128 __A) +{ + __v4sf __va = (__v4sf)__A; + __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3)); + _mm_store_ps (__P, __tmp); +} + +/* Sets the low SPFP value of A from the low value of B. */ +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_move_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_shuffle ((__v4sf)__A, (__v4sf)__B, + __extension__ + (__attribute__((__vector_size__ (16))) int) + {4,1,2,3}); +} + +/* Extracts one of the four words of A. The selector N must be immediate. */ +#ifdef __OPTIMIZE__ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_extract_pi16 (__m64 const __A, int const __N) +{ + return (unsigned short) __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pextrw (__m64 const __A, int const __N) +{ + return _mm_extract_pi16 (__A, __N); +} +#else +#define _mm_extract_pi16(A, N) \ + ((int) (unsigned short) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N))) + +#define _m_pextrw(A, N) _mm_extract_pi16(A, N) +#endif + +/* Inserts word D into one of four words of A. The selector N must be + immediate. */ +#ifdef __OPTIMIZE__ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_insert_pi16 (__m64 const __A, int const __D, int const __N) +{ + return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pinsrw (__m64 const __A, int const __D, int const __N) +{ + return _mm_insert_pi16 (__A, __D, __N); +} +#else +#define _mm_insert_pi16(A, D, N) \ + ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A), \ + (int)(D), (int)(N))) + +#define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N) +#endif + +/* Compute the element-wise maximum of signed 16-bit values. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_pi16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmaxsw (__m64 __A, __m64 __B) +{ + return _mm_max_pi16 (__A, __B); +} + +/* Compute the element-wise maximum of unsigned 8-bit values. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_max_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmaxub (__m64 __A, __m64 __B) +{ + return _mm_max_pu8 (__A, __B); +} + +/* Compute the element-wise minimum of signed 16-bit values. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_pi16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pminsw (__m64 __A, __m64 __B) +{ + return _mm_min_pi16 (__A, __B); +} + +/* Compute the element-wise minimum of unsigned 8-bit values. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_min_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pminub (__m64 __A, __m64 __B) +{ + return _mm_min_pu8 (__A, __B); +} + +/* Create an 8-bit mask of the signs of 8-bit values. */ +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_movemask_pi8 (__m64 __A) +{ + return __builtin_ia32_pmovmskb ((__v8qi)__A); +} + +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmovmskb (__m64 __A) +{ + return _mm_movemask_pi8 (__A); +} + +/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values + in B and produce the high 16 bits of the 32-bit results. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_mulhi_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pmulhuw (__m64 __A, __m64 __B) +{ + return _mm_mulhi_pu16 (__A, __B); +} + +/* Return a combination of the four 16-bit values in A. The selector + must be an immediate. */ +#ifdef __OPTIMIZE__ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shuffle_pi16 (__m64 __A, int const __N) +{ + return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pshufw (__m64 __A, int const __N) +{ + return _mm_shuffle_pi16 (__A, __N); +} +#else +#define _mm_shuffle_pi16(A, N) \ + ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N))) + +#define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N) +#endif + +/* Conditionally store byte elements of A into P. The high bit of each + byte in the selector N determines whether the corresponding byte from + A is stored. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P) +{ +#ifdef __MMX_WITH_SSE__ + /* Emulate MMX maskmovq with SSE2 maskmovdqu and handle unmapped bits + 64:127 at address __P. */ + typedef long long __v2di __attribute__ ((__vector_size__ (16))); + typedef char __v16qi __attribute__ ((__vector_size__ (16))); + /* Zero-extend __A and __N to 128 bits. */ + __v2di __A128 = __extension__ (__v2di) { ((__v1di) __A)[0], 0 }; + __v2di __N128 = __extension__ (__v2di) { ((__v1di) __N)[0], 0 }; + + /* Check the alignment of __P. */ + __SIZE_TYPE__ offset = ((__SIZE_TYPE__) __P) & 0xf; + if (offset) + { + /* If the misalignment of __P > 8, subtract __P by 8 bytes. + Otherwise, subtract __P by the misalignment. */ + if (offset > 8) + offset = 8; + __P = (char *) (((__SIZE_TYPE__) __P) - offset); + + /* Shift __A128 and __N128 to the left by the adjustment. */ + switch (offset) + { + case 1: + __A128 = __builtin_ia32_pslldqi128 (__A128, 8); + __N128 = __builtin_ia32_pslldqi128 (__N128, 8); + break; + case 2: + __A128 = __builtin_ia32_pslldqi128 (__A128, 2 * 8); + __N128 = __builtin_ia32_pslldqi128 (__N128, 2 * 8); + break; + case 3: + __A128 = __builtin_ia32_pslldqi128 (__A128, 3 * 8); + __N128 = __builtin_ia32_pslldqi128 (__N128, 3 * 8); + break; + case 4: + __A128 = __builtin_ia32_pslldqi128 (__A128, 4 * 8); + __N128 = __builtin_ia32_pslldqi128 (__N128, 4 * 8); + break; + case 5: + __A128 = __builtin_ia32_pslldqi128 (__A128, 5 * 8); + __N128 = __builtin_ia32_pslldqi128 (__N128, 5 * 8); + break; + case 6: + __A128 = __builtin_ia32_pslldqi128 (__A128, 6 * 8); + __N128 = __builtin_ia32_pslldqi128 (__N128, 6 * 8); + break; + case 7: + __A128 = __builtin_ia32_pslldqi128 (__A128, 7 * 8); + __N128 = __builtin_ia32_pslldqi128 (__N128, 7 * 8); + break; + case 8: + __A128 = __builtin_ia32_pslldqi128 (__A128, 8 * 8); + __N128 = __builtin_ia32_pslldqi128 (__N128, 8 * 8); + break; + default: + break; + } + } + __builtin_ia32_maskmovdqu ((__v16qi)__A128, (__v16qi)__N128, __P); +#else + __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P); +#endif +} + +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_maskmovq (__m64 __A, __m64 __N, char *__P) +{ + _mm_maskmove_si64 (__A, __N, __P); +} + +/* Compute the rounded averages of the unsigned 8-bit values in A and B. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_avg_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pavgb (__m64 __A, __m64 __B) +{ + return _mm_avg_pu8 (__A, __B); +} + +/* Compute the rounded averages of the unsigned 16-bit values in A and B. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_avg_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_pavgw (__m64 __A, __m64 __B) +{ + return _mm_avg_pu16 (__A, __B); +} + +/* Compute the sum of the absolute differences of the unsigned 8-bit + values in A and B. Return the value in the lower 16-bit word; the + upper words are cleared. */ +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sad_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B); +} + +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_m_psadbw (__m64 __A, __m64 __B) +{ + return _mm_sad_pu8 (__A, __B); +} + +/* Stores the data in A to the address P without polluting the caches. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_pi (__m64 *__P, __m64 __A) +{ + __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A); +} + +/* Likewise. The address must be 16-byte aligned. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_stream_ps (float *__P, __m128 __A) +{ + __builtin_ia32_movntps (__P, (__v4sf)__A); +} + +/* Guarantees that every preceding store is globally visible before + any subsequent store. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sfence (void) +{ + __builtin_ia32_sfence (); +} + +/* Transpose the 4x4 matrix composed of row[0-3]. */ +#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ +do { \ + __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \ + __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \ + __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \ + __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \ + __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \ + (row0) = __builtin_ia32_movlhps (__t0, __t1); \ + (row1) = __builtin_ia32_movhlps (__t1, __t0); \ + (row2) = __builtin_ia32_movlhps (__t2, __t3); \ + (row3) = __builtin_ia32_movhlps (__t3, __t2); \ +} while (0) + +/* For backward source compatibility. */ +# include + +#ifdef __DISABLE_SSE__ +#undef __DISABLE_SSE__ +#pragma GCC pop_options +#endif /* __DISABLE_SSE__ */ + +/* The execution of the next instruction is delayed by an implementation + specific amount of time. The instruction does not modify the + architectural state. This is after the pop_options pragma because + it does not require SSE support in the processor--the encoding is a + nop on processors that do not support it. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_pause (void) +{ + __builtin_ia32_pause (); +} + +#endif /* _XMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/xopintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/xopintrin.h new file mode 100644 index 0000000..9a32bd5 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/xopintrin.h @@ -0,0 +1,850 @@ +/* Copyright (C) 2007-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86INTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _XOPMMINTRIN_H_INCLUDED +#define _XOPMMINTRIN_H_INCLUDED + +#include + +#ifndef __XOP__ +#pragma GCC push_options +#pragma GCC target("xop") +#define __DISABLE_XOP__ +#endif /* __XOP__ */ + +/* Integer multiply/add instructions. */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacssww ((__v8hi)__A,(__v8hi)__B, (__v8hi)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacsww ((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacsswd ((__v8hi)__A, (__v8hi)__B, (__v4si)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacswd ((__v8hi)__A, (__v8hi)__B, (__v4si)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacssdd ((__v4si)__A, (__v4si)__B, (__v4si)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacsdd ((__v4si)__A, (__v4si)__B, (__v4si)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacssdql ((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacsdql ((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacssdqh ((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmacsdqh ((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmadcsswd ((__v8hi)__A,(__v8hi)__B,(__v4si)__C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpmadcswd ((__v8hi)__A,(__v8hi)__B,(__v4si)__C); +} + +/* Packed Integer Horizontal Add and Subtract */ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddw_epi8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddbw ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddd_epi8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddbd ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddq_epi8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddbq ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddd_epi16(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddwd ((__v8hi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddq_epi16(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddwq ((__v8hi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddq_epi32(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphadddq ((__v4si)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddw_epu8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddubw ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddd_epu8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddubd ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddq_epu8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddubq ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddd_epu16(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphadduwd ((__v8hi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddq_epu16(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphadduwq ((__v8hi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_haddq_epu32(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphaddudq ((__v4si)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsubw_epi8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphsubbw ((__v16qi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsubd_epi16(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphsubwd ((__v8hi)__A); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_hsubq_epi32(__m128i __A) +{ + return (__m128i) __builtin_ia32_vphsubdq ((__v4si)__A); +} + +/* Vector conditional move and permute */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpcmov (__A, __B, __C); +} + +extern __inline __m256i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i) __builtin_ia32_vpcmov256 (__A, __B, __C); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i) __builtin_ia32_vpperm ((__v16qi)__A, (__v16qi)__B, (__v16qi)__C); +} + +/* Packed Integer Rotates and Shifts + Rotates - Non-Immediate form */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rot_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vprotb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rot_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vprotw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rot_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vprotd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_rot_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vprotq ((__v2di)__A, (__v2di)__B); +} + +/* Rotates - Immediate form */ + +#ifdef __OPTIMIZE__ +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roti_epi8(__m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_vprotbi ((__v16qi)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roti_epi16(__m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_vprotwi ((__v8hi)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roti_epi32(__m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_vprotdi ((__v4si)__A, __B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_roti_epi64(__m128i __A, const int __B) +{ + return (__m128i) __builtin_ia32_vprotqi ((__v2di)__A, __B); +} +#else +#define _mm_roti_epi8(A, N) \ + ((__m128i) __builtin_ia32_vprotbi ((__v16qi)(__m128i)(A), (int)(N))) +#define _mm_roti_epi16(A, N) \ + ((__m128i) __builtin_ia32_vprotwi ((__v8hi)(__m128i)(A), (int)(N))) +#define _mm_roti_epi32(A, N) \ + ((__m128i) __builtin_ia32_vprotdi ((__v4si)(__m128i)(A), (int)(N))) +#define _mm_roti_epi64(A, N) \ + ((__m128i) __builtin_ia32_vprotqi ((__v2di)(__m128i)(A), (int)(N))) +#endif + +/* Shifts */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shl_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshlb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shl_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshlw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shl_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshld ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_shl_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshlq ((__v2di)__A, (__v2di)__B); +} + + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshab ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshaw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshad ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_sha_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpshaq ((__v2di)__A, (__v2di)__B); +} + +/* Compare and Predicate Generation + pcom (integer, unsigned bytes) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltub ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomleub ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtub ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgeub ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomequb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomnequb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalseub ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epu8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrueub ((__v16qi)__A, (__v16qi)__B); +} + +/*pcom (integer, unsigned words) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltuw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomleuw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtuw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgeuw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomequw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomnequw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalseuw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epu16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrueuw ((__v8hi)__A, (__v8hi)__B); +} + +/*pcom (integer, unsigned double words) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltud ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomleud ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtud ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgeud ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomequd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomnequd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalseud ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epu32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrueud ((__v4si)__A, (__v4si)__B); +} + +/*pcom (integer, unsigned quad words) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltuq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomleuq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtuq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgeuq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomequq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomnequq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalseuq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epu64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrueuq ((__v2di)__A, (__v2di)__B); +} + +/*pcom (integer, signed bytes) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomleb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgeb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomeqb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomneqb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalseb ((__v16qi)__A, (__v16qi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrueb ((__v16qi)__A, (__v16qi)__B); +} + +/*pcom (integer, signed words) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomlew ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgew ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomeqw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomneqw ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalsew ((__v8hi)__A, (__v8hi)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epi16(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtruew ((__v8hi)__A, (__v8hi)__B); +} + +/*pcom (integer, signed double words) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomled ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomged ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomeqd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomneqd ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalsed ((__v4si)__A, (__v4si)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epi32(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrued ((__v4si)__A, (__v4si)__B); +} + +/*pcom (integer, signed quad words) */ + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comlt_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomltq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comle_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomleq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comgt_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgtq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comge_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomgeq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comeq_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomeqq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comneq_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomneqq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comfalse_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomfalseq ((__v2di)__A, (__v2di)__B); +} + +extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_comtrue_epi64(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vpcomtrueq ((__v2di)__A, (__v2di)__B); +} + +/* FRCZ */ + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_frcz_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_vfrczps ((__v4sf)__A); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_frcz_pd (__m128d __A) +{ + return (__m128d) __builtin_ia32_vfrczpd ((__v2df)__A); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_frcz_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_movss ((__v4sf)__A, + (__v4sf) + __builtin_ia32_vfrczss ((__v4sf)__B)); +} + +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_frcz_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_movsd ((__v2df)__A, + (__v2df) + __builtin_ia32_vfrczsd ((__v2df)__B)); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_frcz_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_vfrczps256 ((__v8sf)__A); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_frcz_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_vfrczpd256 ((__v4df)__A); +} + +/* PERMIL2 */ + +#ifdef __OPTIMIZE__ +extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permute2_pd (__m128d __X, __m128d __Y, __m128i __C, const int __I) +{ + return (__m128d) __builtin_ia32_vpermil2pd ((__v2df)__X, + (__v2df)__Y, + (__v2di)__C, + __I); +} + +extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute2_pd (__m256d __X, __m256d __Y, __m256i __C, const int __I) +{ + return (__m256d) __builtin_ia32_vpermil2pd256 ((__v4df)__X, + (__v4df)__Y, + (__v4di)__C, + __I); +} + +extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_permute2_ps (__m128 __X, __m128 __Y, __m128i __C, const int __I) +{ + return (__m128) __builtin_ia32_vpermil2ps ((__v4sf)__X, + (__v4sf)__Y, + (__v4si)__C, + __I); +} + +extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm256_permute2_ps (__m256 __X, __m256 __Y, __m256i __C, const int __I) +{ + return (__m256) __builtin_ia32_vpermil2ps256 ((__v8sf)__X, + (__v8sf)__Y, + (__v8si)__C, + __I); +} +#else +#define _mm_permute2_pd(X, Y, C, I) \ + ((__m128d) __builtin_ia32_vpermil2pd ((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__v2di)(__m128i)(C), \ + (int)(I))) + +#define _mm256_permute2_pd(X, Y, C, I) \ + ((__m256d) __builtin_ia32_vpermil2pd256 ((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), \ + (__v4di)(__m256i)(C), \ + (int)(I))) + +#define _mm_permute2_ps(X, Y, C, I) \ + ((__m128) __builtin_ia32_vpermil2ps ((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), \ + (__v4si)(__m128i)(C), \ + (int)(I))) + +#define _mm256_permute2_ps(X, Y, C, I) \ + ((__m256) __builtin_ia32_vpermil2ps256 ((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), \ + (__v8si)(__m256i)(C), \ + (int)(I))) +#endif /* __OPTIMIZE__ */ + +#ifdef __DISABLE_XOP__ +#undef __DISABLE_XOP__ +#pragma GCC pop_options +#endif /* __DISABLE_XOP__ */ + +#endif /* _XOPMMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/xsavecintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/xsavecintrin.h new file mode 100644 index 0000000..e21fdf5 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/xsavecintrin.h @@ -0,0 +1,58 @@ +/* Copyright (C) 2014-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _XSAVECINTRIN_H_INCLUDED +#define _XSAVECINTRIN_H_INCLUDED + +#ifndef __XSAVEC__ +#pragma GCC push_options +#pragma GCC target("xsavec") +#define __DISABLE_XSAVEC__ +#endif /* __XSAVEC__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xsavec (void *__P, long long __M) +{ + __builtin_ia32_xsavec (__P, __M); +} + +#ifdef __x86_64__ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xsavec64 (void *__P, long long __M) +{ + __builtin_ia32_xsavec64 (__P, __M); +} +#endif + +#ifdef __DISABLE_XSAVEC__ +#undef __DISABLE_XSAVEC__ +#pragma GCC pop_options +#endif /* __DISABLE_XSAVEC__ */ + +#endif /* _XSAVECINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/xsaveintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/xsaveintrin.h new file mode 100644 index 0000000..8bfd3f0 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/xsaveintrin.h @@ -0,0 +1,86 @@ +/* Copyright (C) 2012-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _XSAVEINTRIN_H_INCLUDED +#define _XSAVEINTRIN_H_INCLUDED + +#ifndef __XSAVE__ +#pragma GCC push_options +#pragma GCC target("xsave") +#define __DISABLE_XSAVE__ +#endif /* __XSAVE__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xsave (void *__P, long long __M) +{ + __builtin_ia32_xsave (__P, __M); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xrstor (void *__P, long long __M) +{ + __builtin_ia32_xrstor (__P, __M); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xsetbv (unsigned int __A, long long __V) +{ + __builtin_ia32_xsetbv (__A, __V); +} + +extern __inline long long +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xgetbv (unsigned int __A) +{ + return __builtin_ia32_xgetbv (__A); +} + +#ifdef __x86_64__ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xsave64 (void *__P, long long __M) +{ + __builtin_ia32_xsave64 (__P, __M); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xrstor64 (void *__P, long long __M) +{ + __builtin_ia32_xrstor64 (__P, __M); +} +#endif + +#ifdef __DISABLE_XSAVE__ +#undef __DISABLE_XSAVE__ +#pragma GCC pop_options +#endif /* __DISABLE_XSAVE__ */ + +#endif /* _XSAVEINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/xsaveoptintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/xsaveoptintrin.h new file mode 100644 index 0000000..1a7c549 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/xsaveoptintrin.h @@ -0,0 +1,58 @@ +/* Copyright (C) 2012-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _XSAVEOPTINTRIN_H_INCLUDED +#define _XSAVEOPTINTRIN_H_INCLUDED + +#ifndef __XSAVEOPT__ +#pragma GCC push_options +#pragma GCC target("xsaveopt") +#define __DISABLE_XSAVEOPT__ +#endif /* __XSAVEOPT__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xsaveopt (void *__P, long long __M) +{ + __builtin_ia32_xsaveopt (__P, __M); +} + +#ifdef __x86_64__ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xsaveopt64 (void *__P, long long __M) +{ + __builtin_ia32_xsaveopt64 (__P, __M); +} +#endif + +#ifdef __DISABLE_XSAVEOPT__ +#undef __DISABLE_XSAVEOPT__ +#pragma GCC pop_options +#endif /* __DISABLE_XSAVEOPT__ */ + +#endif /* _XSAVEOPTINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/xsavesintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/xsavesintrin.h new file mode 100644 index 0000000..36964e9 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/xsavesintrin.h @@ -0,0 +1,72 @@ +/* Copyright (C) 2014-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _XSAVESINTRIN_H_INCLUDED +#define _XSAVESINTRIN_H_INCLUDED + +#ifndef __XSAVES__ +#pragma GCC push_options +#pragma GCC target("xsaves") +#define __DISABLE_XSAVES__ +#endif /* __XSAVES__ */ + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xsaves (void *__P, long long __M) +{ + __builtin_ia32_xsaves (__P, __M); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xrstors (void *__P, long long __M) +{ + __builtin_ia32_xrstors (__P, __M); +} + +#ifdef __x86_64__ +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xrstors64 (void *__P, long long __M) +{ + __builtin_ia32_xrstors64 (__P, __M); +} + +extern __inline void +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xsaves64 (void *__P, long long __M) +{ + __builtin_ia32_xsaves64 (__P, __M); +} +#endif + +#ifdef __DISABLE_XSAVES__ +#undef __DISABLE_XSAVES__ +#pragma GCC pop_options +#endif /* __DISABLE_XSAVES__ */ + +#endif /* _XSAVESINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/include/xtestintrin.h b/lib/gcc/x86_64-linux-musl/12.2.0/include/xtestintrin.h new file mode 100644 index 0000000..b8cd6b0 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/include/xtestintrin.h @@ -0,0 +1,51 @@ +/* Copyright (C) 2012-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _X86GPRINTRIN_H_INCLUDED +# error "Never use directly; include instead." +#endif + +#ifndef _XTESTINTRIN_H_INCLUDED +#define _XTESTINTRIN_H_INCLUDED + +#ifndef __RTM__ +#pragma GCC push_options +#pragma GCC target("rtm") +#define __DISABLE_RTM__ +#endif /* __RTM__ */ + +/* Return non-zero if the instruction executes inside an RTM or HLE code + region. Return zero otherwise. */ +extern __inline int +__attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_xtest (void) +{ + return __builtin_ia32_xtest (); +} + +#ifdef __DISABLE_RTM__ +#undef __DISABLE_RTM__ +#pragma GCC pop_options +#endif /* __DISABLE_RTM__ */ + +#endif /* _XTESTINTRIN_H_INCLUDED */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/fixinc_list b/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/fixinc_list new file mode 100644 index 0000000..092bc2b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/fixinc_list @@ -0,0 +1 @@ +; diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/gsyslimits.h b/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/gsyslimits.h new file mode 100644 index 0000000..a362802 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/gsyslimits.h @@ -0,0 +1,8 @@ +/* syslimits.h stands for the system's own limits.h file. + If we can use it ok unmodified, then we install this text. + If fixincludes fixes it, then the fixed version is installed + instead of this text. */ + +#define _GCC_NEXT_LIMITS_H /* tell gcc's limits.h to recurse */ +#include_next +#undef _GCC_NEXT_LIMITS_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/include/README b/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/include/README new file mode 100644 index 0000000..7086a77 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/include/README @@ -0,0 +1,14 @@ +This README file is copied into the directory for GCC-only header files +when fixincludes is run by the makefile for GCC. + +Many of the files in this directory were automatically edited from the +standard system header files by the fixincludes process. They are +system-specific, and will not work on any other kind of system. They +are also not part of GCC. The reason we have to do this is because +GCC requires ANSI C headers and many vendors supply ANSI-incompatible +headers. + +Because this is an automated process, sometimes headers get "fixed" +that do not, strictly speaking, need a fix. As long as nothing is broken +by the process, it is just an unfortunate collateral inconvenience. +We would like to rectify it, if it is not "too inconvenient". diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/include/limits.h b/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/include/limits.h new file mode 100644 index 0000000..9390f01 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/include/limits.h @@ -0,0 +1,206 @@ +/* Copyright (C) 1992-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* This administrivia gets added to the beginning of limits.h + if the system has its own version of limits.h. */ + +/* We use _GCC_LIMITS_H_ because we want this not to match + any macros that the system's limits.h uses for its own purposes. */ +#ifndef _GCC_LIMITS_H_ /* Terminated in limity.h. */ +#define _GCC_LIMITS_H_ + +#ifndef _LIBC_LIMITS_H_ +/* Use "..." so that we find syslimits.h only in this same directory. */ +#include "syslimits.h" +#endif +/* Copyright (C) 1991-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#ifndef _LIMITS_H___ +#define _LIMITS_H___ + +/* Number of bits in a `char'. */ +#undef CHAR_BIT +#define CHAR_BIT __CHAR_BIT__ + +/* Maximum length of a multibyte character. */ +#ifndef MB_LEN_MAX +#define MB_LEN_MAX 1 +#endif + +/* Minimum and maximum values a `signed char' can hold. */ +#undef SCHAR_MIN +#define SCHAR_MIN (-SCHAR_MAX - 1) +#undef SCHAR_MAX +#define SCHAR_MAX __SCHAR_MAX__ + +/* Maximum value an `unsigned char' can hold. (Minimum is 0). */ +#undef UCHAR_MAX +#if __SCHAR_MAX__ == __INT_MAX__ +# define UCHAR_MAX (SCHAR_MAX * 2U + 1U) +#else +# define UCHAR_MAX (SCHAR_MAX * 2 + 1) +#endif + +/* Minimum and maximum values a `char' can hold. */ +#ifdef __CHAR_UNSIGNED__ +# undef CHAR_MIN +# if __SCHAR_MAX__ == __INT_MAX__ +# define CHAR_MIN 0U +# else +# define CHAR_MIN 0 +# endif +# undef CHAR_MAX +# define CHAR_MAX UCHAR_MAX +#else +# undef CHAR_MIN +# define CHAR_MIN SCHAR_MIN +# undef CHAR_MAX +# define CHAR_MAX SCHAR_MAX +#endif + +/* Minimum and maximum values a `signed short int' can hold. */ +#undef SHRT_MIN +#define SHRT_MIN (-SHRT_MAX - 1) +#undef SHRT_MAX +#define SHRT_MAX __SHRT_MAX__ + +/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */ +#undef USHRT_MAX +#if __SHRT_MAX__ == __INT_MAX__ +# define USHRT_MAX (SHRT_MAX * 2U + 1U) +#else +# define USHRT_MAX (SHRT_MAX * 2 + 1) +#endif + +/* Minimum and maximum values a `signed int' can hold. */ +#undef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#undef INT_MAX +#define INT_MAX __INT_MAX__ + +/* Maximum value an `unsigned int' can hold. (Minimum is 0). */ +#undef UINT_MAX +#define UINT_MAX (INT_MAX * 2U + 1U) + +/* Minimum and maximum values a `signed long int' can hold. + (Same as `int'). */ +#undef LONG_MIN +#define LONG_MIN (-LONG_MAX - 1L) +#undef LONG_MAX +#define LONG_MAX __LONG_MAX__ + +/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */ +#undef ULONG_MAX +#define ULONG_MAX (LONG_MAX * 2UL + 1UL) + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +/* Minimum and maximum values a `signed long long int' can hold. */ +# undef LLONG_MIN +# define LLONG_MIN (-LLONG_MAX - 1LL) +# undef LLONG_MAX +# define LLONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +# undef ULLONG_MAX +# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) +#endif + +#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__) +/* Minimum and maximum values a `signed long long int' can hold. */ +# undef LONG_LONG_MIN +# define LONG_LONG_MIN (-LONG_LONG_MAX - 1LL) +# undef LONG_LONG_MAX +# define LONG_LONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +# undef ULONG_LONG_MAX +# define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1ULL) +#endif + +#if (defined __STDC_WANT_IEC_60559_BFP_EXT__ \ + || (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L)) +/* TS 18661-1 / C2X widths of integer types. */ +# undef CHAR_WIDTH +# define CHAR_WIDTH __SCHAR_WIDTH__ +# undef SCHAR_WIDTH +# define SCHAR_WIDTH __SCHAR_WIDTH__ +# undef UCHAR_WIDTH +# define UCHAR_WIDTH __SCHAR_WIDTH__ +# undef SHRT_WIDTH +# define SHRT_WIDTH __SHRT_WIDTH__ +# undef USHRT_WIDTH +# define USHRT_WIDTH __SHRT_WIDTH__ +# undef INT_WIDTH +# define INT_WIDTH __INT_WIDTH__ +# undef UINT_WIDTH +# define UINT_WIDTH __INT_WIDTH__ +# undef LONG_WIDTH +# define LONG_WIDTH __LONG_WIDTH__ +# undef ULONG_WIDTH +# define ULONG_WIDTH __LONG_WIDTH__ +# undef LLONG_WIDTH +# define LLONG_WIDTH __LONG_LONG_WIDTH__ +# undef ULLONG_WIDTH +# define ULLONG_WIDTH __LONG_LONG_WIDTH__ +#endif + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L +/* C2X width and limit of _Bool. */ +# undef BOOL_MAX +# define BOOL_MAX 1 +# undef BOOL_WIDTH +# define BOOL_WIDTH 1 +#endif + +#endif /* _LIMITS_H___ */ +/* This administrivia gets added to the end of limits.h + if the system has its own version of limits.h. */ + +#else /* not _GCC_LIMITS_H_ */ + +#ifdef _GCC_NEXT_LIMITS_H +#include_next /* recurse down to the real one */ +#endif + +#endif /* not _GCC_LIMITS_H_ */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/macro_list b/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/macro_list new file mode 100644 index 0000000..d28d5cc --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/macro_list @@ -0,0 +1,2 @@ +linux +unix diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/mkheaders.conf b/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/mkheaders.conf new file mode 100644 index 0000000..f9c742f --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/install-tools/mkheaders.conf @@ -0,0 +1,3 @@ +SYSTEM_HEADER_DIR="/mnt/everest/sources/mussel/sysroot${sysroot_headers_suffix}/usr/include" +OTHER_FIXINCLUDES_DIRS="" +STMP_FIXINC="stmp-fixinc" diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/libgcc.a b/lib/gcc/x86_64-linux-musl/12.2.0/libgcc.a new file mode 100644 index 0000000..8f5e328 Binary files /dev/null and b/lib/gcc/x86_64-linux-musl/12.2.0/libgcc.a differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/libgcc_eh.a b/lib/gcc/x86_64-linux-musl/12.2.0/libgcc_eh.a new file mode 100644 index 0000000..b4616f0 Binary files /dev/null and b/lib/gcc/x86_64-linux-musl/12.2.0/libgcc_eh.a differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/libgcov.a b/lib/gcc/x86_64-linux-musl/12.2.0/libgcov.a new file mode 100644 index 0000000..c235f71 Binary files /dev/null and b/lib/gcc/x86_64-linux-musl/12.2.0/libgcov.a differ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/gtype.state b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/gtype.state new file mode 100644 index 0000000..1211dbe --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/gtype.state @@ -0,0 +1,38020 @@ +;;;;@@@@ GCC gengtype state +;;; DON'T EDIT THIS FILE, since generated by GCC's gengtype +;;; The format of this file is tied to a particular version of GCC. +;;; Don't parse this file wihout knowing GCC gengtype internals. +;;; This file should be parsed by the same gengtype which wrote it. + +(!version "12.2.0") +(!srcdir "/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc") +(!languages 10 ada c cp d fortran go jit lto objc objcp) +(!fileslist 234 0 + (!srcfile 1023 "../libcpp/include/line-map.h") + (!srcfile 1023 "../libcpp/include/cpplib.h") + (!srcfile 1023 "input.h") + (!srcfile 1023 "coretypes.h") + (!file 1023 "auto-host.h") + (!srcfile 1023 "../include/ansidecl.h") + (!file 1023 "options.h") + (!srcfile 1023 "config/vxworks-dummy.h") + (!srcfile 1023 "config/i386/biarch64.h") + (!srcfile 1023 "config/i386/i386.h") + (!srcfile 1023 "config/i386/unix.h") + (!srcfile 1023 "config/i386/att.h") + (!srcfile 1023 "config/dbxelf.h") + (!srcfile 1023 "config/elfos.h") + (!srcfile 1023 "config/gnu-user.h") + (!srcfile 1023 "config/glibc-stdint.h") + (!srcfile 1023 "config/i386/x86-64.h") + (!srcfile 1023 "config/i386/gnu-user-common.h") + (!srcfile 1023 "config/i386/gnu-user64.h") + (!srcfile 1023 "config/linux.h") + (!srcfile 1023 "config/linux-android.h") + (!srcfile 1023 "config/i386/linux-common.h") + (!srcfile 1023 "config/i386/linux64.h") + (!srcfile 1023 "config/initfini-array.h") + (!srcfile 1023 "defaults.h") + (!srcfile 1023 "../include/hashtab.h") + (!srcfile 1023 "../include/splay-tree.h") + (!srcfile 1023 "bitmap.h") + (!srcfile 1023 "wide-int.h") + (!srcfile 1023 "alias.h") + (!srcfile 1023 "coverage.cc") + (!srcfile 1023 "rtl.h") + (!srcfile 1023 "optabs.h") + (!srcfile 1023 "tree.h") + (!srcfile 1023 "tree-core.h") + (!srcfile 1023 "libfuncs.h") + (!srcfile 1023 "../libcpp/include/symtab.h") + (!srcfile 1023 "../include/obstack.h") + (!srcfile 1023 "real.h") + (!srcfile 1023 "function.h") + (!srcfile 1023 "insn-addr.h") + (!srcfile 1023 "hwint.h") + (!srcfile 1023 "fixed-value.h") + (!srcfile 1023 "function-abi.h") + (!srcfile 1023 "output.h") + (!srcfile 1023 "cfgloop.h") + (!srcfile 1023 "cfg.h") + (!srcfile 1023 "profile-count.h") + (!srcfile 1023 "cselib.h") + (!srcfile 1023 "basic-block.h") + (!srcfile 1023 "ipa-ref.h") + (!srcfile 1023 "cgraph.h") + (!srcfile 1023 "symtab-thunks.h") + (!srcfile 1023 "symtab-thunks.cc") + (!srcfile 1023 "symtab-clones.h") + (!srcfile 1023 "reload.h") + (!srcfile 1023 "caller-save.cc") + (!srcfile 1023 "symtab.cc") + (!srcfile 1023 "alias.cc") + (!srcfile 1023 "bitmap.cc") + (!srcfile 1023 "cselib.cc") + (!srcfile 1023 "cgraph.cc") + (!srcfile 1023 "ipa-prop.cc") + (!srcfile 1023 "ipa-cp.cc") + (!srcfile 1023 "ipa-utils.h") + (!srcfile 1023 "ipa-param-manipulation.h") + (!srcfile 1023 "ipa-sra.cc") + (!srcfile 1023 "dbxout.cc") + (!srcfile 1023 "ipa-modref.h") + (!srcfile 1023 "ipa-modref.cc") + (!srcfile 1023 "ipa-modref-tree.h") + (!srcfile 1023 "signop.h") + (!srcfile 1023 "diagnostic-spec.h") + (!srcfile 1023 "diagnostic-spec.cc") + (!srcfile 1023 "dwarf2out.h") + (!srcfile 1023 "dwarf2asm.cc") + (!srcfile 1023 "dwarf2cfi.cc") + (!srcfile 1023 "dwarf2ctf.cc") + (!srcfile 1023 "dwarf2out.cc") + (!srcfile 1023 "ctfc.h") + (!srcfile 1023 "ctfout.cc") + (!srcfile 1023 "btfout.cc") + (!srcfile 1023 "tree-vect-generic.cc") + (!srcfile 1023 "gimple-isel.cc") + (!srcfile 1023 "dojump.cc") + (!srcfile 1023 "emit-rtl.h") + (!srcfile 1023 "emit-rtl.cc") + (!srcfile 1023 "except.h") + (!srcfile 1023 "explow.cc") + (!srcfile 1023 "expr.cc") + (!srcfile 1023 "expr.h") + (!srcfile 1023 "function.cc") + (!srcfile 1023 "except.cc") + (!srcfile 1023 "ggc-tests.cc") + (!srcfile 1023 "gcse.cc") + (!srcfile 1023 "godump.cc") + (!srcfile 1023 "lists.cc") + (!srcfile 1023 "optabs-libfuncs.cc") + (!srcfile 1023 "profile.cc") + (!srcfile 1023 "mcf.cc") + (!srcfile 1023 "reg-stack.cc") + (!srcfile 1023 "cfgrtl.cc") + (!srcfile 1023 "stor-layout.cc") + (!srcfile 1023 "stringpool.cc") + (!srcfile 1023 "tree.cc") + (!srcfile 1023 "varasm.cc") + (!srcfile 1023 "gimple.h") + (!srcfile 1023 "gimple-ssa.h") + (!srcfile 1023 "tree-ssanames.cc") + (!srcfile 1023 "tree-eh.cc") + (!srcfile 1023 "tree-ssa-address.cc") + (!srcfile 1023 "tree-cfg.cc") + (!srcfile 1023 "tree-ssa-loop-ivopts.cc") + (!srcfile 1023 "tree-dfa.cc") + (!srcfile 1023 "tree-iterator.cc") + (!srcfile 1023 "gimple-expr.cc") + (!srcfile 1023 "tree-chrec.h") + (!srcfile 1023 "tree-scalar-evolution.cc") + (!srcfile 1023 "tree-ssa-operands.h") + (!srcfile 1023 "tree-profile.cc") + (!srcfile 1023 "tree-nested.cc") + (!srcfile 1023 "omp-offload.h") + (!srcfile 1023 "omp-general.cc") + (!srcfile 1023 "omp-low.cc") + (!srcfile 1023 "targhooks.cc") + (!srcfile 1023 "config/i386/i386.cc") + (!srcfile 1023 "passes.cc") + (!srcfile 1023 "cgraphclones.cc") + (!srcfile 1023 "tree-phinodes.cc") + (!srcfile 1023 "tree-ssa-alias.h") + (!srcfile 1023 "tree-ssanames.h") + (!srcfile 1023 "tree-vrp.h") + (!srcfile 1023 "value-range.h") + (!srcfile 1023 "ipa-prop.h") + (!srcfile 1023 "trans-mem.cc") + (!srcfile 1023 "lto-streamer.h") + (!srcfile 1023 "target-globals.h") + (!srcfile 1023 "ipa-predicate.h") + (!srcfile 1023 "ipa-fnsummary.h") + (!srcfile 1023 "vtable-verify.cc") + (!srcfile 1023 "asan.cc") + (!srcfile 1023 "ubsan.cc") + (!srcfile 1023 "tsan.cc") + (!srcfile 1023 "sanopt.cc") + (!srcfile 1023 "sancov.cc") + (!srcfile 1023 "ipa-devirt.cc") + (!srcfile 1023 "internal-fn.h") + (!srcfile 1023 "calls.cc") + (!srcfile 1023 "omp-general.h") + (!srcfile 1023 "config/i386/i386-builtins.cc") + (!srcfile 1023 "config/i386/i386-expand.cc") + (!srcfile 1023 "config/i386/i386-options.cc") + (!srcfile 1 "ada/gcc-interface/ada-tree.h") + (!srcfile 1 "ada/gcc-interface/gigi.h") + (!srcfile 1 "ada/gcc-interface/decl.cc") + (!srcfile 1 "ada/gcc-interface/trans.cc") + (!srcfile 1 "ada/gcc-interface/utils.cc") + (!srcfile 1 "ada/gcc-interface/misc.cc") + (!srcfile 2 "c/c-lang.cc") + (!srcfile 258 "c/c-tree.h") + (!srcfile 258 "c/c-decl.cc") + (!srcfile 774 "c-family/c-common.cc") + (!srcfile 774 "c-family/c-common.h") + (!srcfile 774 "c-family/c-objc.h") + (!srcfile 774 "c-family/c-cppbuiltin.cc") + (!srcfile 774 "c-family/c-pragma.h") + (!srcfile 774 "c-family/c-pragma.cc") + (!srcfile 774 "c-family/c-format.cc") + (!srcfile 258 "c/c-objc-common.cc") + (!srcfile 258 "c/c-parser.h") + (!srcfile 258 "c/c-parser.cc") + (!srcfile 258 "c/c-lang.h") + (!srcfile 516 "cp/name-lookup.h") + (!srcfile 516 "cp/cp-tree.h") + (!srcfile 516 "cp/decl.h") + (!srcfile 516 "cp/parser.h") + (!srcfile 516 "cp/call.cc") + (!srcfile 516 "cp/class.cc") + (!srcfile 516 "cp/constexpr.cc") + (!srcfile 516 "cp/constraint.cc") + (!srcfile 516 "cp/coroutines.cc") + (!srcfile 516 "cp/cp-gimplify.cc") + (!srcfile 4 "cp/cp-lang.cc") + (!srcfile 516 "cp/cp-objcp-common.cc") + (!srcfile 516 "cp/decl.cc") + (!srcfile 516 "cp/decl2.cc") + (!srcfile 516 "cp/except.cc") + (!srcfile 516 "cp/friend.cc") + (!srcfile 516 "cp/init.cc") + (!srcfile 516 "cp/lambda.cc") + (!srcfile 516 "cp/lex.cc") + (!srcfile 516 "cp/logic.cc") + (!srcfile 516 "cp/mangle.cc") + (!srcfile 516 "cp/method.cc") + (!srcfile 516 "cp/module.cc") + (!srcfile 516 "cp/name-lookup.cc") + (!srcfile 516 "cp/parser.cc") + (!srcfile 516 "cp/pt.cc") + (!srcfile 516 "cp/rtti.cc") + (!srcfile 516 "cp/semantics.cc") + (!srcfile 516 "cp/tree.cc") + (!srcfile 516 "cp/typeck2.cc") + (!srcfile 516 "cp/vtable-class-hierarchy.cc") + (!srcfile 8 "d/d-tree.h") + (!srcfile 8 "d/d-builtins.cc") + (!srcfile 8 "d/d-lang.cc") + (!srcfile 8 "d/typeinfo.cc") + (!srcfile 16 "fortran/f95-lang.cc") + (!srcfile 16 "fortran/trans-decl.cc") + (!srcfile 16 "fortran/trans-intrinsic.cc") + (!srcfile 16 "fortran/trans-io.cc") + (!srcfile 16 "fortran/trans-stmt.cc") + (!srcfile 16 "fortran/trans-types.cc") + (!srcfile 16 "fortran/trans-types.h") + (!srcfile 16 "fortran/trans.h") + (!srcfile 16 "fortran/trans-const.h") + (!srcfile 32 "go/go-lang.cc") + (!srcfile 32 "go/go-c.h") + (!srcfile 64 "jit/dummy-frontend.cc") + (!srcfile 128 "lto/lto-tree.h") + (!srcfile 128 "lto/lto-lang.cc") + (!srcfile 128 "lto/lto.cc") + (!srcfile 128 "lto/lto.h") + (!srcfile 128 "lto/lto-common.h") + (!srcfile 128 "lto/lto-common.cc") + (!srcfile 128 "lto/lto-dump.cc") + (!srcfile 768 "objc/objc-map.h") + (!srcfile 768 "objc/objc-act.h") + (!srcfile 768 "objc/objc-act.cc") + (!srcfile 768 "objc/objc-runtime-shared-support.cc") + (!srcfile 768 "objc/objc-gnu-runtime-abi-01.cc") + (!srcfile 768 "objc/objc-next-runtime-abi-01.cc") + (!srcfile 768 "objc/objc-next-runtime-abi-02.cc") + (!srcfile 512 "objcp/objcp-lang.cc") +) +(!structures 1773 + + (!type struct 1 nil gc_used "source_range" + (!srcfileloc "../libcpp/include/line-map.h" 756) + (!fields 2 + (!pair "m_start" + (!type scalar_nonchar 2 + (!type pointer 3 + (!type pointer 4 nil gc_unused + (!type already_seen 3) + ) + gc_used + (!type already_seen 2) + ) + gc_pointed_to) + (!srcfileloc "../libcpp/include/line-map.h" 322) + nil ) + (!pair "m_finish" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 323) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 5 nil gc_used "line_map" + (!srcfileloc "../libcpp/include/line-map.h" 389) + (!fields 1 + (!pair "start_location" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 386) + nil ) + ) + (!options + (!option desc string "MAP_ORDINARY_P (&%h) ? 1 : 2") + (!option tag string "0") + ) + 1023 nil nil ) + + (!type struct 6 + (!type pointer 7 nil gc_unused + (!type already_seen 6) + ) + gc_used "line_map_ordinary" + (!srcfileloc "../libcpp/include/line-map.h" 725) + (!fields 7 + (!pair "reason" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 409) + nil ) + (!pair "sysp" + (!type scalar_char 8 + (!type pointer 9 + (!type pointer 10 nil gc_unused + (!type already_seen 9) + ) + gc_unused + (!type already_seen 8) + ) + gc_used) + (!srcfileloc "../libcpp/include/line-map.h" 415) + nil ) + (!pair "m_column_and_range_bits" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 419) + nil ) + (!pair "m_range_bits" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 434) + nil ) + (!pair "to_file" + (!type string 11 nil gc_used) + (!srcfileloc "../libcpp/include/line-map.h" 438) + nil ) + (!pair "to_line" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 439) + nil ) + (!pair "included_from" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 445) + nil ) + ) + (!options + (!option tag string "1") + ) + 1023 nil + (!type already_seen 5) + ) + + (!type struct 12 + (!type pointer 13 + (!type pointer 14 nil gc_unused + (!type already_seen 13) + ) + gc_unused + (!type already_seen 12) + ) + gc_used "cpp_hashnode" + (!srcfileloc "../libcpp/include/cpplib.h" 969) + (!fields 8 + (!pair "ident" + (!type struct 15 + (!type pointer 16 + (!type pointer 17 nil gc_unused + (!type already_seen 16) + ) + gc_unused + (!type already_seen 15) + ) + gc_used "ht_identifier" + (!srcfileloc "../libcpp/include/symtab.h" 35) + (!fields 3 + (!pair "str" + (!type already_seen 11) + (!srcfileloc "../libcpp/include/symtab.h" 32) + nil ) + (!pair "len" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/symtab.h" 33) + nil ) + (!pair "hash_value" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/symtab.h" 34) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "../libcpp/include/cpplib.h" 951) + nil ) + (!pair "is_directive" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 952) + nil ) + (!pair "directive_index" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 953) + nil ) + (!pair "rid_code" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 956) + nil ) + (!pair "flags" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 957) + nil ) + (!pair "type" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 958) + nil ) + (!pair "deferred" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 966) + nil ) + (!pair "value" + (!type union 18 nil gc_used "_cpp_hashnode_value" + (!srcfileloc "../libcpp/include/cpplib.h" 948) + (!fields 4 + (!pair "answers" + (!type pointer 19 nil gc_used + (!type struct 20 + (!type already_seen 19) + gc_pointed_to "cpp_macro" + (!srcfileloc "../libcpp/include/cpplib.h" 878) + (!fields 13 + (!pair "parm" + (!type union 21 nil gc_used "cpp_parm_u" + (!srcfileloc "../libcpp/include/cpplib.h" 828) + (!fields 2 + (!pair "params" + (!type already_seen 14) + (!srcfileloc "../libcpp/include/cpplib.h" 824) + (!options + (!option length string "%1.paramc") + (!option nested_ptr nested + (!type union 22 + (!type pointer 23 + (!type pointer 24 nil gc_used + (!type already_seen 23) + ) + gc_pointed_to + (!type already_seen 22) + ) + gc_pointed_to "tree_node" + (!srcfileloc "tree-core.h" 2086) + (!fields 39 + (!pair "base" + (!type struct 25 nil gc_used "tree_base" + (!srcfileloc "tree-core.h" 1135) + (!fields 18 + (!pair "code" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1025) + nil ) + (!pair "side_effects_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1027) + nil ) + (!pair "constant_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1028) + nil ) + (!pair "addressable_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1029) + nil ) + (!pair "volatile_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1030) + nil ) + (!pair "readonly_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1031) + nil ) + (!pair "asm_written_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1032) + nil ) + (!pair "nowarning_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1033) + nil ) + (!pair "visited" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1034) + nil ) + (!pair "used_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1036) + nil ) + (!pair "nothrow_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1037) + nil ) + (!pair "static_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1038) + nil ) + (!pair "public_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1039) + nil ) + (!pair "private_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1040) + nil ) + (!pair "protected_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1041) + nil ) + (!pair "deprecated_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1042) + nil ) + (!pair "default_def_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1043) + nil ) + (!pair "u" + (!type union 26 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/tree-core.h:1045" + (!srcfileloc "tree-core.h" 1134) + (!fields 9 + (!pair "bits" + (!type struct 27 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/tree-core.h:1048" + (!srcfileloc "tree-core.h" 1073) + (!fields 17 + (!pair "lang_flag_0" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1049) + nil ) + (!pair "lang_flag_1" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1050) + nil ) + (!pair "lang_flag_2" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1051) + nil ) + (!pair "lang_flag_3" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1052) + nil ) + (!pair "lang_flag_4" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1053) + nil ) + (!pair "lang_flag_5" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1054) + nil ) + (!pair "lang_flag_6" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1055) + nil ) + (!pair "saturating_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1056) + nil ) + (!pair "unsigned_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1058) + nil ) + (!pair "packed_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1059) + nil ) + (!pair "user_align" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1060) + nil ) + (!pair "nameless_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1061) + nil ) + (!pair "atomic_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1062) + nil ) + (!pair "unavailable_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1063) + nil ) + (!pair "spare0" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1064) + nil ) + (!pair "spare1" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1066) + nil ) + (!pair "address_space" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1072) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 1073) + nil ) + (!pair "int_length" + (!type struct 28 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/tree-core.h:1080" + (!srcfileloc "tree-core.h" 1094) + (!fields 3 + (!pair "unextended" + (!type already_seen 8) + (!srcfileloc "tree-core.h" 1083) + nil ) + (!pair "extended" + (!type already_seen 8) + (!srcfileloc "tree-core.h" 1087) + nil ) + (!pair "offset" + (!type already_seen 8) + (!srcfileloc "tree-core.h" 1093) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 1094) + nil ) + (!pair "length" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1097) + nil ) + (!pair "vector_cst" + (!type struct 29 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/tree-core.h:1100" + (!srcfileloc "tree-core.h" 1109) + (!fields 3 + (!pair "log2_npatterns" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1102) + nil ) + (!pair "nelts_per_pattern" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1105) + nil ) + (!pair "unused" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1108) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 1109) + nil ) + (!pair "version" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1112) + nil ) + (!pair "chrec_var" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1115) + nil ) + (!pair "ifn" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1118) + nil ) + (!pair "omp_atomic_memory_order" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1121) + nil ) + (!pair "dependence_info" + (!type struct 30 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/tree-core.h:1130" + (!srcfileloc "tree-core.h" 1133) + (!fields 2 + (!pair "clique" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1131) + nil ) + (!pair "base" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1132) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 1133) + nil ) + ) + nil 1023 nil ) + (!srcfileloc "tree-core.h" 1134) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2043) + (!options + (!option tag string "TS_BASE") + ) + ) + (!pair "typed" + (!type struct 31 nil gc_used "tree_typed" + (!srcfileloc "tree-core.h" 1449) + (!fields 2 + (!pair "base" + (!type already_seen 25) + (!srcfileloc "tree-core.h" 1447) + nil ) + (!pair "type" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1448) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2044) + (!options + (!option tag string "TS_TYPED") + ) + ) + (!pair "common" + (!type struct 32 nil gc_used "tree_common" + (!srcfileloc "tree-core.h" 1454) + (!fields 2 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "tree-core.h" 1452) + nil ) + (!pair "chain" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1453) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2045) + (!options + (!option tag string "TS_COMMON") + ) + ) + (!pair "int_cst" + (!type struct 33 nil gc_used "tree_int_cst" + (!srcfileloc "tree-core.h" 1459) + (!fields 2 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "tree-core.h" 1457) + nil ) + (!pair "val" + (!type array 34 nil gc_used "1" + (!type already_seen 2) + ) + (!srcfileloc "tree-core.h" 1458) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2046) + (!options + (!option tag string "TS_INT_CST") + ) + ) + (!pair "poly_int_cst" + (!type struct 35 nil gc_used "tree_poly_int_cst" + (!srcfileloc "tree-core.h" 1492) + (!fields 2 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "tree-core.h" 1490) + nil ) + (!pair "coeffs" + (!type array 36 nil gc_used "NUM_POLY_INT_COEFFS" + (!type already_seen 23) + ) + (!srcfileloc "tree-core.h" 1491) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2047) + (!options + (!option tag string "TS_POLY_INT_CST") + ) + ) + (!pair "real_cst" + (!type struct 37 nil gc_used "tree_real_cst" + (!srcfileloc "tree-core.h" 1465) + (!fields 2 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "tree-core.h" 1463) + nil ) + (!pair "real_cst_ptr" + (!type pointer 38 nil gc_used + (!type struct 39 + (!type already_seen 38) + gc_pointed_to "real_value" + (!srcfileloc "real.h" 57) + (!fields 7 + (!pair "cl" + (!type already_seen 2) + (!srcfileloc "real.h" 43) + nil ) + (!pair "decimal" + (!type already_seen 2) + (!srcfileloc "real.h" 45) + nil ) + (!pair "sign" + (!type already_seen 2) + (!srcfileloc "real.h" 47) + nil ) + (!pair "signalling" + (!type already_seen 2) + (!srcfileloc "real.h" 49) + nil ) + (!pair "canonical" + (!type already_seen 2) + (!srcfileloc "real.h" 52) + nil ) + (!pair "uexp" + (!type already_seen 2) + (!srcfileloc "real.h" 54) + nil ) + (!pair "sig" + (!type array 40 nil gc_used "SIGSZ" + (!type already_seen 2) + ) + (!srcfileloc "real.h" 56) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "tree-core.h" 1464) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2048) + (!options + (!option tag string "TS_REAL_CST") + ) + ) + (!pair "fixed_cst" + (!type struct 41 nil gc_used "tree_fixed_cst" + (!srcfileloc "tree-core.h" 1470) + (!fields 2 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "tree-core.h" 1468) + nil ) + (!pair "fixed_cst_ptr" + (!type pointer 42 nil gc_used + (!type struct 43 + (!type already_seen 42) + gc_pointed_to "fixed_value" + (!srcfileloc "fixed-value.h" 27) + (!fields 2 + (!pair "data" + (!type already_seen 2) + (!srcfileloc "fixed-value.h" 25) + nil ) + (!pair "mode" + (!type user_struct 44 nil gc_used "pod_mode" + (!srcfileloc "coretypes.h" 69) + (!fields 1 + (!pair "scalar_mode" + (!type struct 45 nil gc_used "scalar_mode" + (!srcfileloc "coretypes.h" 65) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "coretypes.h" 69) + nil ) + ) + ) + (!srcfileloc "fixed-value.h" 26) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "tree-core.h" 1469) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2049) + (!options + (!option tag string "TS_FIXED_CST") + ) + ) + (!pair "vector" + (!type struct 46 nil gc_used "tree_vector" + (!srcfileloc "tree-core.h" 1487) + (!fields 2 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "tree-core.h" 1485) + nil ) + (!pair "elts" + (!type array 47 nil gc_used "1" + (!type already_seen 23) + ) + (!srcfileloc "tree-core.h" 1486) + (!options + (!option length string "vector_cst_encoded_nelts ((tree) &%h)") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2050) + (!options + (!option tag string "TS_VECTOR") + ) + ) + (!pair "string" + (!type struct 48 nil gc_used "tree_string" + (!srcfileloc "tree-core.h" 1476) + (!fields 3 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "tree-core.h" 1473) + nil ) + (!pair "length" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1474) + nil ) + (!pair "str" + (!type array 49 nil gc_used "1" + (!type already_seen 8) + ) + (!srcfileloc "tree-core.h" 1475) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2051) + (!options + (!option tag string "TS_STRING") + ) + ) + (!pair "complex" + (!type struct 50 nil gc_used "tree_complex" + (!srcfileloc "tree-core.h" 1482) + (!fields 3 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "tree-core.h" 1479) + nil ) + (!pair "real" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1480) + nil ) + (!pair "imag" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1481) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2052) + (!options + (!option tag string "TS_COMPLEX") + ) + ) + (!pair "identifier" + (!type struct 51 nil gc_used "tree_identifier" + (!srcfileloc "tree-core.h" 1497) + (!fields 2 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "tree-core.h" 1495) + nil ) + (!pair "id" + (!type already_seen 15) + (!srcfileloc "tree-core.h" 1496) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2053) + (!options + (!option tag string "TS_IDENTIFIER") + ) + ) + (!pair "decl_minimal" + (!type struct 52 nil gc_used "tree_decl_minimal" + (!srcfileloc "tree-core.h" 1755) + (!fields 5 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "tree-core.h" 1750) + nil ) + (!pair "locus" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1751) + nil ) + (!pair "uid" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1752) + nil ) + (!pair "name" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1753) + nil ) + (!pair "context" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1754) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2054) + (!options + (!option tag string "TS_DECL_MINIMAL") + ) + ) + (!pair "decl_common" + (!type struct 53 nil gc_used "tree_decl_common" + (!srcfileloc "tree-core.h" 1828) + (!fields 36 + (!pair "common" + (!type already_seen 52) + (!srcfileloc "tree-core.h" 1758) + nil ) + (!pair "size" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1759) + nil ) + (!pair "mode" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1761) + nil ) + (!pair "nonlocal_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1763) + nil ) + (!pair "virtual_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1764) + nil ) + (!pair "ignored_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1765) + nil ) + (!pair "abstract_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1766) + nil ) + (!pair "artificial_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1767) + nil ) + (!pair "preserve_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1768) + nil ) + (!pair "debug_expr_is_from" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1769) + nil ) + (!pair "lang_flag_0" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1771) + nil ) + (!pair "lang_flag_1" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1772) + nil ) + (!pair "lang_flag_2" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1773) + nil ) + (!pair "lang_flag_3" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1774) + nil ) + (!pair "lang_flag_4" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1775) + nil ) + (!pair "lang_flag_5" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1776) + nil ) + (!pair "lang_flag_6" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1777) + nil ) + (!pair "lang_flag_7" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1778) + nil ) + (!pair "lang_flag_8" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1779) + nil ) + (!pair "decl_flag_0" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1784) + nil ) + (!pair "decl_flag_1" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1788) + nil ) + (!pair "decl_flag_2" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1792) + nil ) + (!pair "decl_flag_3" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1794) + nil ) + (!pair "not_gimple_reg_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1797) + nil ) + (!pair "decl_by_reference_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1799) + nil ) + (!pair "decl_read_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1801) + nil ) + (!pair "decl_nonshareable_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1804) + nil ) + (!pair "off_align" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1807) + nil ) + (!pair "align" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1810) + nil ) + (!pair "warn_if_not_align" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1814) + nil ) + (!pair "pt_uid" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1819) + nil ) + (!pair "size_unit" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1821) + nil ) + (!pair "initial" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1822) + nil ) + (!pair "attributes" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1823) + nil ) + (!pair "abstract_origin" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1824) + nil ) + (!pair "lang_specific" + (!type pointer 54 nil gc_used + (!type lang_struct 55 + (!type already_seen 54) + gc_pointed_to "lang_decl" + (!srcfileloc "ada/gcc-interface/ada-tree.h" 37) + (!fields 0 ) + nil 1023 + (!homotypes 8 + (!type struct 56 nil gc_pointed_to "lang_decl" + (!srcfileloc "lto/lto-tree.h" 32) + (!fields 1 + (!pair "dummy" + (!type already_seen 2) + (!srcfileloc "lto/lto-tree.h" 31) + nil ) + ) + nil 128 + (!type already_seen 55) + nil ) + + (!type struct 57 nil gc_pointed_to "lang_decl" + (!srcfileloc "jit/dummy-frontend.cc" 497) + (!fields 1 + (!pair "dummy" + (!type already_seen 8) + (!srcfileloc "jit/dummy-frontend.cc" 496) + nil ) + ) + (!options + (!option variable_size string "") + ) + 64 + (!type already_seen 55) + nil ) + + (!type struct 58 nil gc_pointed_to "lang_decl" + (!srcfileloc "go/go-lang.cc" 58) + (!fields 1 + (!pair "dummy" + (!type already_seen 8) + (!srcfileloc "go/go-lang.cc" 57) + nil ) + ) + nil 32 + (!type already_seen 55) + nil ) + + (!type struct 59 nil gc_pointed_to "lang_decl" + (!srcfileloc "fortran/trans.h" 1028) + (!fields 9 + (!pair "saved_descriptor" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 1015) + nil ) + (!pair "stringlen" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 1020) + nil ) + (!pair "addr" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 1021) + nil ) + (!pair "token" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 1023) + nil ) + (!pair "caf_offset" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 1023) + nil ) + (!pair "scalar_allocatable" + (!type already_seen 2) + (!srcfileloc "fortran/trans.h" 1024) + nil ) + (!pair "scalar_pointer" + (!type already_seen 2) + (!srcfileloc "fortran/trans.h" 1025) + nil ) + (!pair "scalar_target" + (!type already_seen 2) + (!srcfileloc "fortran/trans.h" 1026) + nil ) + (!pair "optional_arg" + (!type already_seen 2) + (!srcfileloc "fortran/trans.h" 1027) + nil ) + ) + nil 16 + (!type already_seen 55) + nil ) + + (!type struct 60 nil gc_pointed_to "lang_decl" + (!srcfileloc "d/d-tree.h" 277) + (!fields 7 + (!pair "decl" + (!type pointer 61 nil gc_unused + (!type struct 62 + (!type already_seen 61) + gc_unused "Declaration" + (!srcfileloc "d/d-tree.h" 212) + (!fields 0 ) + nil 8 nil nil ) + ) + (!srcfileloc "d/d-tree.h" 258) + (!options + (!option skip string "") + ) + ) + (!pair "frame_field" + (!type already_seen 23) + (!srcfileloc "d/d-tree.h" 261) + nil ) + (!pair "named_result" + (!type already_seen 23) + (!srcfileloc "d/d-tree.h" 264) + nil ) + (!pair "thunks" + (!type already_seen 23) + (!srcfileloc "d/d-tree.h" 267) + nil ) + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "d/d-tree.h" 270) + nil ) + (!pair "intrinsic" + (!type already_seen 2) + (!srcfileloc "d/d-tree.h" 273) + nil ) + (!pair "frame_info" + (!type already_seen 23) + (!srcfileloc "d/d-tree.h" 276) + nil ) + ) + nil 8 + (!type already_seen 55) + nil ) + + (!type struct 63 nil gc_pointed_to "lang_decl" + (!srcfileloc "cp/cp-tree.h" 2995) + (!fields 1 + (!pair "u" + (!type union 64 nil gc_used "lang_decl_u" + (!srcfileloc "cp/cp-tree.h" 2994) + (!fields 6 + (!pair "base" + (!type struct 65 nil gc_used "lang_decl_base" + (!srcfileloc "cp/cp-tree.h" 2852) + (!fields 17 + (!pair "selector" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2818) + nil ) + (!pair "language" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2819) + nil ) + (!pair "use_template" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2820) + nil ) + (!pair "not_really_extern" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2821) + nil ) + (!pair "initialized_in_class" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2822) + nil ) + (!pair "threadprivate_or_deleted_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2824) + nil ) + (!pair "anticipated_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2828) + nil ) + (!pair "friend_or_tls" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2829) + nil ) + (!pair "unknown_bound_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2830) + nil ) + (!pair "odr_used" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2831) + nil ) + (!pair "concept_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2832) + nil ) + (!pair "var_declared_inline_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2833) + nil ) + (!pair "dependent_init_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2834) + nil ) + (!pair "module_purview_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2844) + nil ) + (!pair "module_import_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2845) + nil ) + (!pair "module_entity_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2846) + nil ) + (!pair "module_attached_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2849) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 2988) + (!options + (!option default string "") + ) + ) + (!pair "min" + (!type struct 66 nil gc_used "lang_decl_min" + (!srcfileloc "cp/cp-tree.h" 2883) + (!fields 3 + (!pair "base" + (!type already_seen 65) + (!srcfileloc "cp/cp-tree.h" 2867) + nil ) + (!pair "template_info" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2874) + nil ) + (!pair "access" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2882) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 2989) + (!options + (!option tag string "lds_min") + ) + ) + (!pair "fn" + (!type struct 67 nil gc_used "lang_decl_fn" + (!srcfileloc "cp/cp-tree.h" 2947) + (!fields 24 + (!pair "min" + (!type already_seen 66) + (!srcfileloc "cp/cp-tree.h" 2888) + nil ) + (!pair "ovl_op_code" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2891) + nil ) + (!pair "global_ctor_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2892) + nil ) + (!pair "global_dtor_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2893) + nil ) + (!pair "static_function" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2895) + nil ) + (!pair "pure_virtual" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2896) + nil ) + (!pair "defaulted_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2897) + nil ) + (!pair "has_in_charge_parm_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2898) + nil ) + (!pair "has_vtt_parm_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2899) + nil ) + (!pair "pending_inline_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2900) + nil ) + (!pair "nonconverting" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2901) + nil ) + (!pair "thunk_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2902) + nil ) + (!pair "this_thunk_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2904) + nil ) + (!pair "omp_declare_reduction_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2905) + nil ) + (!pair "has_dependent_explicit_spec_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2906) + nil ) + (!pair "immediate_fn_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2907) + nil ) + (!pair "maybe_deleted" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2908) + nil ) + (!pair "coroutine_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2909) + nil ) + (!pair "implicit_constexpr" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2910) + nil ) + (!pair "spare" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2912) + nil ) + (!pair "befriending_classes" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2919) + nil ) + (!pair "context" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2929) + nil ) + (!pair "u5" + (!type union 68 nil gc_used "lang_decl_u5" + (!srcfileloc "cp/cp-tree.h" 2939) + (!fields 2 + (!pair "cloned_function" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2934) + (!options + (!option tag string "0") + ) + ) + (!pair "fixed_offset" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2938) + (!options + (!option tag string "1") + ) + ) + ) + nil 516 nil ) + (!srcfileloc "cp/cp-tree.h" 2939) + (!options + (!option desc string "%1.thunk_p") + ) + ) + (!pair "u" + (!type union 69 nil gc_used "lang_decl_u3" + (!srcfileloc "cp/cp-tree.h" 2945) + (!fields 2 + (!pair "pending_inline_info" + (!type pointer 70 nil gc_used + (!type struct 71 + (!type already_seen 70) + gc_pointed_to "cp_token_cache" + (!srcfileloc "cp/parser.h" 141) + (!fields 2 + (!pair "first" + (!type pointer 72 nil gc_unused + (!type struct 73 + (!type already_seen 72) + gc_used "cp_token" + (!srcfileloc "cp/parser.h" 87) + (!fields 10 + (!pair "type" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 44) + nil ) + (!pair "keyword" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 47) + nil ) + (!pair "flags" + (!type already_seen 8) + (!srcfileloc "cp/parser.h" 49) + nil ) + (!pair "implicit_extern_c" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 51) + nil ) + (!pair "error_reported" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 55) + nil ) + (!pair "purged_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 59) + nil ) + (!pair "tree_check_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 60) + nil ) + (!pair "main_source_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 61) + nil ) + (!pair "location" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 65) + nil ) + (!pair "u" + (!type union 74 nil gc_used "cp_token_value" + (!srcfileloc "cp/parser.h" 72) + (!fields 2 + (!pair "tree_check_value" + (!type pointer 75 nil gc_used + (!type struct 76 + (!type already_seen 75) + gc_pointed_to "tree_check" + (!srcfileloc "cp/parser.h" 38) + (!fields 3 + (!pair "value" + (!type already_seen 23) + (!srcfileloc "cp/parser.h" 32) + nil ) + (!pair "checks" + (!type pointer 77 nil gc_used + (!type user_struct 78 + (!type already_seen 77) + gc_pointed_to "vec" + (!srcfileloc "cp/cp-tree.h" 1560) + (!fields 2 + (!pair "va_gc" + (!type undefined 79 nil gc_unused "va_gc" + (!srcfileloc "rtl.h" 267) + ) + (!srcfileloc "cp/cp-tree.h" 1560) + nil ) + (!pair "deferred_access_check" + (!type struct 80 nil gc_used "deferred_access_check" + (!srcfileloc "cp/cp-tree.h" 1560) + (!fields 4 + (!pair "binfo" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1547) + nil ) + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1549) + nil ) + (!pair "diag_decl" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1551) + nil ) + (!pair "loc" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1553) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1560) + nil ) + ) + ) + ) + (!srcfileloc "cp/parser.h" 34) + nil ) + (!pair "qualifying_scope" + (!type already_seen 23) + (!srcfileloc "cp/parser.h" 37) + nil ) + ) + nil 516 nil nil ) + ) + (!srcfileloc "cp/parser.h" 69) + (!options + (!option tag string "true") + ) + ) + (!pair "value" + (!type already_seen 23) + (!srcfileloc "cp/parser.h" 71) + (!options + (!option tag string "false") + ) + ) + ) + nil 516 nil ) + (!srcfileloc "cp/parser.h" 72) + (!options + (!option desc string "%1.tree_check_p") + ) + ) + ) + nil 516 nil nil ) + ) + (!srcfileloc "cp/parser.h" 135) + (!options + (!option skip string "") + ) + ) + (!pair "last" + (!type already_seen 72) + (!srcfileloc "cp/parser.h" 138) + (!options + (!option skip string "") + ) + ) + ) + nil 516 nil nil ) + ) + (!srcfileloc "cp/cp-tree.h" 2943) + (!options + (!option tag string "1") + ) + ) + (!pair "saved_auto_return_type" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2944) + (!options + (!option tag string "0") + ) + ) + ) + nil 516 nil ) + (!srcfileloc "cp/cp-tree.h" 2945) + (!options + (!option desc string "%1.pending_inline_p") + ) + ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 2990) + (!options + (!option tag string "lds_fn") + ) + ) + (!pair "ns" + (!type struct 81 nil gc_used "lang_decl_ns" + (!srcfileloc "cp/cp-tree.h" 2962) + (!fields 4 + (!pair "base" + (!type already_seen 65) + (!srcfileloc "cp/cp-tree.h" 2952) + nil ) + (!pair "level" + (!type pointer 82 nil gc_used + (!type struct 83 + (!type already_seen 82) + gc_pointed_to "cp_binding_level" + (!srcfileloc "cp/name-lookup.h" 316) + (!fields 17 + (!pair "names" + (!type already_seen 23) + (!srcfileloc "cp/name-lookup.h" 250) + nil ) + (!pair "using_directives" + (!type pointer 84 nil gc_used + (!type user_struct 85 + (!type already_seen 84) + gc_pointed_to "vec" + (!srcfileloc "tree-core.h" 1653) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "tree-core.h" 1653) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1653) + nil ) + ) + ) + ) + (!srcfileloc "cp/name-lookup.h" 253) + nil ) + (!pair "class_shadowed" + (!type pointer 86 nil gc_used + (!type user_struct 87 + (!type already_seen 86) + gc_pointed_to "vec" + (!srcfileloc "cp/name-lookup.h" 257) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/name-lookup.h" 257) + nil ) + (!pair "cp_class_binding" + (!type struct 88 nil gc_used "cp_class_binding" + (!srcfileloc "cp/name-lookup.h" 257) + (!fields 2 + (!pair "base" + (!type pointer 89 nil gc_used + (!type struct 90 + (!type already_seen 89) + gc_pointed_to "cxx_binding" + (!srcfileloc "cp/name-lookup.h" 59) + (!fields 7 + (!pair "previous" + (!type already_seen 89) + (!srcfileloc "cp/name-lookup.h" 48) + nil ) + (!pair "value" + (!type already_seen 23) + (!srcfileloc "cp/name-lookup.h" 50) + nil ) + (!pair "type" + (!type already_seen 23) + (!srcfileloc "cp/name-lookup.h" 52) + nil ) + (!pair "scope" + (!type already_seen 82) + (!srcfileloc "cp/name-lookup.h" 54) + nil ) + (!pair "value_is_inherited" + (!type already_seen 2) + (!srcfileloc "cp/name-lookup.h" 56) + nil ) + (!pair "is_local" + (!type already_seen 2) + (!srcfileloc "cp/name-lookup.h" 57) + nil ) + (!pair "type_is_hidden" + (!type already_seen 2) + (!srcfileloc "cp/name-lookup.h" 58) + nil ) + ) + nil 516 nil nil ) + ) + (!srcfileloc "cp/name-lookup.h" 219) + nil ) + (!pair "identifier" + (!type already_seen 23) + (!srcfileloc "cp/name-lookup.h" 221) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/name-lookup.h" 257) + nil ) + ) + ) + ) + (!srcfileloc "cp/name-lookup.h" 257) + nil ) + (!pair "type_shadowed" + (!type already_seen 23) + (!srcfileloc "cp/name-lookup.h" 264) + nil ) + (!pair "blocks" + (!type already_seen 23) + (!srcfileloc "cp/name-lookup.h" 269) + nil ) + (!pair "this_entity" + (!type already_seen 23) + (!srcfileloc "cp/name-lookup.h" 273) + nil ) + (!pair "level_chain" + (!type already_seen 82) + (!srcfileloc "cp/name-lookup.h" 276) + nil ) + (!pair "statement_list" + (!type already_seen 23) + (!srcfileloc "cp/name-lookup.h" 280) + nil ) + (!pair "binding_depth" + (!type already_seen 2) + (!srcfileloc "cp/name-lookup.h" 283) + nil ) + (!pair "kind" + (!type already_seen 2) + (!srcfileloc "cp/name-lookup.h" 288) + nil ) + (!pair "explicit_spec_p" + (!type already_seen 2) + (!srcfileloc "cp/name-lookup.h" 292) + nil ) + (!pair "keep" + (!type already_seen 2) + (!srcfileloc "cp/name-lookup.h" 295) + nil ) + (!pair "more_cleanups_ok" + (!type already_seen 2) + (!srcfileloc "cp/name-lookup.h" 299) + nil ) + (!pair "have_cleanups" + (!type already_seen 2) + (!srcfileloc "cp/name-lookup.h" 300) + nil ) + (!pair "defining_class_p" + (!type already_seen 2) + (!srcfileloc "cp/name-lookup.h" 307) + nil ) + (!pair "immediate_fn_ctx_p" + (!type already_seen 2) + (!srcfileloc "cp/name-lookup.h" 310) + nil ) + (!pair "requires_expression" + (!type already_seen 2) + (!srcfileloc "cp/name-lookup.h" 313) + nil ) + ) + nil 516 nil nil ) + ) + (!srcfileloc "cp/cp-tree.h" 2953) + nil ) + (!pair "inlinees" + (!type already_seen 84) + (!srcfileloc "cp/cp-tree.h" 2956) + nil ) + (!pair "bindings" + (!type pointer 91 nil gc_used + (!type user_struct 92 + (!type already_seen 91) + gc_pointed_to "hash_table" + (!srcfileloc "cp/cp-tree.h" 2961) + (!fields 1 + (!pair "named_decl_hash" + (!type struct 93 nil gc_used "named_decl_hash" + (!srcfileloc "cp/cp-tree.h" 2961) + (!fields 0 ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 2961) + nil ) + ) + ) + ) + (!srcfileloc "cp/cp-tree.h" 2961) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 2991) + (!options + (!option tag string "lds_ns") + ) + ) + (!pair "parm" + (!type struct 94 nil gc_used "lang_decl_parm" + (!srcfileloc "cp/cp-tree.h" 2970) + (!fields 3 + (!pair "base" + (!type already_seen 65) + (!srcfileloc "cp/cp-tree.h" 2967) + nil ) + (!pair "level" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2968) + nil ) + (!pair "index" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2969) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 2992) + (!options + (!option tag string "lds_parm") + ) + ) + (!pair "decomp" + (!type struct 95 nil gc_used "lang_decl_decomp" + (!srcfileloc "cp/cp-tree.h" 2979) + (!fields 2 + (!pair "min" + (!type already_seen 66) + (!srcfileloc "cp/cp-tree.h" 2975) + nil ) + (!pair "base" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2978) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 2993) + (!options + (!option tag string "lds_decomp") + ) + ) + ) + (!options + (!option desc string "%h.base.selector") + ) + 516 nil ) + (!srcfileloc "cp/cp-tree.h" 2994) + nil ) + ) + nil 516 + (!type already_seen 55) + nil ) + + (!type struct 96 nil gc_pointed_to "lang_decl" + (!srcfileloc "c/c-lang.h" 46) + (!fields 1 + (!pair "dummy" + (!type already_seen 8) + (!srcfileloc "c/c-lang.h" 45) + nil ) + ) + nil 258 + (!type already_seen 55) + nil ) + + (!type struct 97 nil gc_pointed_to "lang_decl" + (!srcfileloc "ada/gcc-interface/ada-tree.h" 37) + (!fields 1 + (!pair "t" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/ada-tree.h" 37) + nil ) + ) + nil 1 + (!type already_seen 55) + nil ) + ) + ) + ) + (!srcfileloc "tree-core.h" 1827) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2055) + (!options + (!option tag string "TS_DECL_COMMON") + ) + ) + (!pair "decl_with_rtl" + (!type struct 98 nil gc_used "tree_decl_with_rtl" + (!srcfileloc "tree-core.h" 1833) + (!fields 2 + (!pair "common" + (!type already_seen 53) + (!srcfileloc "tree-core.h" 1831) + nil ) + (!pair "rtl" + (!type pointer 99 + (!type pointer 100 nil gc_used + (!type already_seen 99) + ) + gc_pointed_to + (!type struct 101 + (!type already_seen 99) + gc_pointed_to "rtx_def" + (!srcfileloc "rtl.h" 453) + (!fields 12 + (!pair "code" + (!type already_seen 2) + (!srcfileloc "rtl.h" 313) + nil ) + (!pair "mode" + (!type already_seen 2) + (!srcfileloc "rtl.h" 316) + nil ) + (!pair "jump" + (!type already_seen 2) + (!srcfileloc "rtl.h" 329) + nil ) + (!pair "call" + (!type already_seen 2) + (!srcfileloc "rtl.h" 336) + nil ) + (!pair "unchanging" + (!type already_seen 2) + (!srcfileloc "rtl.h" 348) + nil ) + (!pair "volatil" + (!type already_seen 2) + (!srcfileloc "rtl.h" 362) + nil ) + (!pair "in_struct" + (!type already_seen 2) + (!srcfileloc "rtl.h" 377) + nil ) + (!pair "used" + (!type already_seen 2) + (!srcfileloc "rtl.h" 386) + nil ) + (!pair "frame_related" + (!type already_seen 2) + (!srcfileloc "rtl.h" 395) + nil ) + (!pair "return_val" + (!type already_seen 2) + (!srcfileloc "rtl.h" 402) + nil ) + (!pair "u2" + (!type union 102 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/rtl.h:404" + (!srcfileloc "rtl.h" 438) + (!fields 6 + (!pair "original_regno" + (!type already_seen 2) + (!srcfileloc "rtl.h" 411) + nil ) + (!pair "insn_uid" + (!type already_seen 2) + (!srcfileloc "rtl.h" 414) + nil ) + (!pair "symbol_ref_flags" + (!type already_seen 2) + (!srcfileloc "rtl.h" 417) + nil ) + (!pair "var_location_status" + (!type already_seen 2) + (!srcfileloc "rtl.h" 420) + nil ) + (!pair "num_elem" + (!type already_seen 2) + (!srcfileloc "rtl.h" 424) + nil ) + (!pair "const_vector" + (!type struct 103 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/rtl.h:428" + (!srcfileloc "rtl.h" 437) + (!fields 3 + (!pair "npatterns" + (!type already_seen 2) + (!srcfileloc "rtl.h" 430) + nil ) + (!pair "nelts_per_pattern" + (!type already_seen 2) + (!srcfileloc "rtl.h" 433) + nil ) + (!pair "unused" + (!type already_seen 2) + (!srcfileloc "rtl.h" 436) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "rtl.h" 437) + nil ) + ) + nil 1023 nil ) + (!srcfileloc "rtl.h" 438) + (!options + (!option skip string "") + ) + ) + (!pair "u" + (!type union 104 nil gc_used "rtx_def_subunion" + (!srcfileloc "rtl.h" 452) + (!fields 152 + (!pair "" + (!type struct 105 nil gc_used "rtx_def_debug_marker" + (!srcfileloc "rtl.h" 452) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "DEBUG_MARKER") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 106 nil gc_used "rtx_def_debug_parameter_ref" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_tree" + (!type already_seen 23) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "DEBUG_PARAMETER_REF") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 107 nil gc_used "rtx_def_entry_value" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ENTRY_VALUE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 108 nil gc_used "rtx_def_debug_implicit_ptr" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_tree" + (!type already_seen 23) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "DEBUG_IMPLICIT_PTR") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 109 nil gc_used "rtx_def_var_location" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_tree" + (!type already_seen 23) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "VAR_LOCATION") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 110 nil gc_used "rtx_def_fma" + (!srcfileloc "rtl.h" 452) + (!fields 3 + (!pair ".fld[2].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "FMA") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 111 nil gc_used "rtx_def_us_truncate" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "US_TRUNCATE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 112 nil gc_used "rtx_def_ss_truncate" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SS_TRUNCATE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 113 nil gc_used "rtx_def_us_minus" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "US_MINUS") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 114 nil gc_used "rtx_def_us_ashift" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "US_ASHIFT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 115 nil gc_used "rtx_def_ss_ashift" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SS_ASHIFT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 116 nil gc_used "rtx_def_ss_abs" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SS_ABS") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 117 nil gc_used "rtx_def_us_neg" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "US_NEG") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 118 nil gc_used "rtx_def_ss_neg" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SS_NEG") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 119 nil gc_used "rtx_def_ss_minus" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SS_MINUS") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 120 nil gc_used "rtx_def_us_plus" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "US_PLUS") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 121 nil gc_used "rtx_def_ss_plus" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SS_PLUS") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 122 nil gc_used "rtx_def_vec_series" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "VEC_SERIES") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 123 nil gc_used "rtx_def_vec_duplicate" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "VEC_DUPLICATE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 124 nil gc_used "rtx_def_vec_concat" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "VEC_CONCAT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 125 nil gc_used "rtx_def_vec_select" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "VEC_SELECT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 126 nil gc_used "rtx_def_vec_merge" + (!srcfileloc "rtl.h" 452) + (!fields 3 + (!pair ".fld[2].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "VEC_MERGE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 127 nil gc_used "rtx_def_lo_sum" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "LO_SUM") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 128 nil gc_used "rtx_def_high" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "HIGH") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 129 nil gc_used "rtx_def_zero_extract" + (!srcfileloc "rtl.h" 452) + (!fields 3 + (!pair ".fld[2].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ZERO_EXTRACT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 130 nil gc_used "rtx_def_sign_extract" + (!srcfileloc "rtl.h" 452) + (!fields 3 + (!pair ".fld[2].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SIGN_EXTRACT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 131 nil gc_used "rtx_def_parity" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "PARITY") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 132 nil gc_used "rtx_def_popcount" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "POPCOUNT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 133 nil gc_used "rtx_def_ctz" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CTZ") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 134 nil gc_used "rtx_def_clz" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CLZ") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 135 nil gc_used "rtx_def_clrsb" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CLRSB") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 136 nil gc_used "rtx_def_ffs" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "FFS") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 137 nil gc_used "rtx_def_bswap" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "BSWAP") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 138 nil gc_used "rtx_def_sqrt" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SQRT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 139 nil gc_used "rtx_def_abs" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ABS") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 140 nil gc_used "rtx_def_unsigned_sat_fract" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNSIGNED_SAT_FRACT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 141 nil gc_used "rtx_def_sat_fract" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SAT_FRACT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 142 nil gc_used "rtx_def_unsigned_fract_convert" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNSIGNED_FRACT_CONVERT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 143 nil gc_used "rtx_def_fract_convert" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "FRACT_CONVERT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 144 nil gc_used "rtx_def_unsigned_fix" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNSIGNED_FIX") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 145 nil gc_used "rtx_def_unsigned_float" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNSIGNED_FLOAT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 146 nil gc_used "rtx_def_fix" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "FIX") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 147 nil gc_used "rtx_def_float" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "FLOAT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 148 nil gc_used "rtx_def_float_truncate" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "FLOAT_TRUNCATE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 149 nil gc_used "rtx_def_float_extend" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "FLOAT_EXTEND") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 150 nil gc_used "rtx_def_truncate" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "TRUNCATE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 151 nil gc_used "rtx_def_zero_extend" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ZERO_EXTEND") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 152 nil gc_used "rtx_def_sign_extend" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SIGN_EXTEND") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 153 nil gc_used "rtx_def_unlt" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNLT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 154 nil gc_used "rtx_def_unle" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNLE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 155 nil gc_used "rtx_def_ungt" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNGT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 156 nil gc_used "rtx_def_unge" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNGE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 157 nil gc_used "rtx_def_uneq" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNEQ") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 158 nil gc_used "rtx_def_ordered" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ORDERED") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 159 nil gc_used "rtx_def_unordered" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNORDERED") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 160 nil gc_used "rtx_def_ltu" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "LTU") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 161 nil gc_used "rtx_def_leu" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "LEU") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 162 nil gc_used "rtx_def_gtu" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "GTU") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 163 nil gc_used "rtx_def_geu" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "GEU") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 164 nil gc_used "rtx_def_ltgt" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "LTGT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 165 nil gc_used "rtx_def_lt" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "LT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 166 nil gc_used "rtx_def_le" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "LE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 167 nil gc_used "rtx_def_gt" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "GT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 168 nil gc_used "rtx_def_ge" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "GE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 169 nil gc_used "rtx_def_eq" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "EQ") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 170 nil gc_used "rtx_def_ne" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "NE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 171 nil gc_used "rtx_def_post_modify" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "POST_MODIFY") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 172 nil gc_used "rtx_def_pre_modify" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "PRE_MODIFY") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 173 nil gc_used "rtx_def_post_inc" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "POST_INC") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 174 nil gc_used "rtx_def_post_dec" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "POST_DEC") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 175 nil gc_used "rtx_def_pre_inc" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "PRE_INC") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 176 nil gc_used "rtx_def_pre_dec" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "PRE_DEC") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 177 nil gc_used "rtx_def_umax" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UMAX") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 178 nil gc_used "rtx_def_umin" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UMIN") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 179 nil gc_used "rtx_def_smax" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SMAX") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 180 nil gc_used "rtx_def_smin" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SMIN") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 181 nil gc_used "rtx_def_rotatert" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ROTATERT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 182 nil gc_used "rtx_def_lshiftrt" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "LSHIFTRT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 183 nil gc_used "rtx_def_ashiftrt" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ASHIFTRT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 184 nil gc_used "rtx_def_rotate" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ROTATE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 185 nil gc_used "rtx_def_ashift" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ASHIFT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 186 nil gc_used "rtx_def_not" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "NOT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 187 nil gc_used "rtx_def_xor" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "XOR") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 188 nil gc_used "rtx_def_ior" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "IOR") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 189 nil gc_used "rtx_def_and" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "AND") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 190 nil gc_used "rtx_def_umod" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UMOD") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 191 nil gc_used "rtx_def_udiv" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UDIV") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 192 nil gc_used "rtx_def_mod" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "MOD") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 193 nil gc_used "rtx_def_us_div" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "US_DIV") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 194 nil gc_used "rtx_def_ss_div" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SS_DIV") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 195 nil gc_used "rtx_def_div" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "DIV") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 196 nil gc_used "rtx_def_umul_highpart" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UMUL_HIGHPART") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 197 nil gc_used "rtx_def_smul_highpart" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SMUL_HIGHPART") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 198 nil gc_used "rtx_def_us_mult" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "US_MULT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 199 nil gc_used "rtx_def_ss_mult" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SS_MULT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 200 nil gc_used "rtx_def_mult" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "MULT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 201 nil gc_used "rtx_def_neg" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "NEG") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 202 nil gc_used "rtx_def_minus" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "MINUS") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 203 nil gc_used "rtx_def_plus" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "PLUS") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 204 nil gc_used "rtx_def_compare" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "COMPARE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 205 nil gc_used "rtx_def_if_then_else" + (!srcfileloc "rtl.h" 452) + (!fields 3 + (!pair ".fld[2].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "IF_THEN_ELSE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 206 nil gc_used "rtx_def_symbol_ref" + (!srcfileloc "rtl.h" 452) + (!fields 3 + (!pair "block_sym" + (!type union 207 nil gc_used "fake_union_1" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair "" + (!type struct 208 nil gc_used "block_symbol" + (!srcfileloc "rtl.h" 245) + (!fields 3 + (!pair "fld" + (!type array 209 nil gc_unused "2" + (!type undefined 210 nil gc_unused "rtunion" + (!srcfileloc "rtl.h" 237) + ) + ) + (!srcfileloc "rtl.h" 237) + (!options + (!option skip string "") + ) + ) + (!pair "block" + (!type pointer 211 nil gc_used + (!type struct 212 + (!type already_seen 211) + gc_pointed_to "object_block" + (!srcfileloc "rtl.h" 278) + (!fields 5 + (!pair "sect" + (!type pointer 213 nil gc_used + (!type union 214 + (!type already_seen 213) + gc_pointed_to "section" + (!srcfileloc "output.h" 515) + (!fields 4 + (!pair "common" + (!type struct 215 nil gc_used "section_common" + (!srcfileloc "output.h" 457) + (!fields 1 + (!pair "flags" + (!type already_seen 2) + (!srcfileloc "output.h" 456) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "output.h" 511) + (!options + (!option skip string "") + ) + ) + (!pair "named" + (!type struct 216 nil gc_used "named_section" + (!srcfileloc "output.h" 469) + (!fields 3 + (!pair "common" + (!type already_seen 215) + (!srcfileloc "output.h" 461) + nil ) + (!pair "name" + (!type already_seen 11) + (!srcfileloc "output.h" 464) + nil ) + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "output.h" 468) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "output.h" 512) + (!options + (!option tag string "SECTION_NAMED") + ) + ) + (!pair "unnamed" + (!type struct 217 nil gc_used "unnamed_section" + (!srcfileloc "output.h" 486) + (!fields 4 + (!pair "common" + (!type already_seen 215) + (!srcfileloc "output.h" 477) + nil ) + (!pair "callback" + (!type callback 218 nil gc_used) + (!srcfileloc "output.h" 481) + (!options + (!option callback string "") + ) + ) + (!pair "data" + (!type already_seen 11) + (!srcfileloc "output.h" 482) + nil ) + (!pair "next" + (!type already_seen 213) + (!srcfileloc "output.h" 485) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "output.h" 513) + (!options + (!option tag string "SECTION_UNNAMED") + ) + ) + (!pair "noswitch" + (!type struct 219 nil gc_used "noswitch_section" + (!srcfileloc "output.h" 507) + (!fields 2 + (!pair "common" + (!type already_seen 215) + (!srcfileloc "output.h" 503) + nil ) + (!pair "callback" + (!type already_seen 218) + (!srcfileloc "output.h" 506) + (!options + (!option callback string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "output.h" 514) + (!options + (!option tag string "SECTION_NOSWITCH") + ) + ) + ) + (!options + (!option for_user string "") + (!option desc string "SECTION_STYLE (&(%h))") + ) + 1023 nil ) + ) + (!srcfileloc "rtl.h" 251) + nil ) + (!pair "alignment" + (!type already_seen 2) + (!srcfileloc "rtl.h" 254) + nil ) + (!pair "size" + (!type already_seen 2) + (!srcfileloc "rtl.h" 257) + nil ) + (!pair "objects" + (!type pointer 220 nil gc_used + (!type user_struct 221 + (!type already_seen 220) + gc_pointed_to "vec" + (!srcfileloc "rtl.h" 267) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "rtl.h" 267) + nil ) + (!pair "rtx" + (!type already_seen 99) + (!srcfileloc "rtl.h" 267) + nil ) + ) + ) + ) + (!srcfileloc "rtl.h" 267) + nil ) + (!pair "anchors" + (!type already_seen 220) + (!srcfileloc "rtl.h" 277) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + ) + (!srcfileloc "rtl.h" 240) + nil ) + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "rtl.h" 244) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1025) + (!options + (!option tag string "1") + (!option dot string "") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "gengtype.cc" 1369) + (!options + (!option desc string "SYMBOL_REF_HAS_BLOCK_INFO_P (&%0)") + ) + ) + (!pair ".fld[1]." + (!type union 222 nil gc_used "rtx_def_symbol_subunion" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair "rt_constant" + (!type pointer 223 nil gc_used + (!type struct 224 + (!type already_seen 223) + gc_pointed_to "constant_descriptor_rtx" + (!srcfileloc "varasm.cc" 3740) + (!fields 10 + (!pair "next" + (!type already_seen 223) + (!srcfileloc "varasm.cc" 3730) + nil ) + (!pair "mem" + (!type already_seen 99) + (!srcfileloc "varasm.cc" 3731) + nil ) + (!pair "sym" + (!type already_seen 99) + (!srcfileloc "varasm.cc" 3732) + nil ) + (!pair "constant" + (!type already_seen 99) + (!srcfileloc "varasm.cc" 3733) + nil ) + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "varasm.cc" 3734) + nil ) + (!pair "hash" + (!type already_seen 2) + (!srcfileloc "varasm.cc" 3735) + nil ) + (!pair "mode" + (!type already_seen 2) + (!srcfileloc "varasm.cc" 3736) + nil ) + (!pair "align" + (!type already_seen 2) + (!srcfileloc "varasm.cc" 3737) + nil ) + (!pair "labelno" + (!type already_seen 2) + (!srcfileloc "varasm.cc" 3738) + nil ) + (!pair "mark" + (!type already_seen 2) + (!srcfileloc "varasm.cc" 3739) + nil ) + ) + (!options + (!option for_user string "") + (!option chain_next string "%h.next") + ) + 1023 nil nil ) + ) + (!srcfileloc "gengtype.cc" 1229) + (!options + (!option tag string "1") + (!option dot string "") + ) + ) + (!pair "rt_tree" + (!type already_seen 23) + (!srcfileloc "gengtype.cc" 1227) + (!options + (!option default string "") + (!option dot string "") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option desc string "CONSTANT_POOL_ADDRESS_P (&%0)") + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_str" + (!type already_seen 11) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SYMBOL_REF") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 225 nil gc_used "rtx_def_label_ref" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "LABEL_REF") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 226 nil gc_used "rtx_def_mem" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_mem" + (!type pointer 227 nil gc_used + (!type struct 228 + (!type already_seen 227) + gc_pointed_to "mem_attrs" + (!srcfileloc "rtl.h" 184) + (!fields 8 + (!pair "expr" + (!type already_seen 23) + (!srcfileloc "rtl.h" 158) + nil ) + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "rtl.h" 162) + nil ) + (!pair "size" + (!type already_seen 2) + (!srcfileloc "rtl.h" 166) + nil ) + (!pair "alias" + (!type already_seen 2) + (!srcfileloc "rtl.h" 169) + nil ) + (!pair "align" + (!type already_seen 2) + (!srcfileloc "rtl.h" 174) + nil ) + (!pair "addrspace" + (!type already_seen 8) + (!srcfileloc "rtl.h" 177) + nil ) + (!pair "offset_known_p" + (!type already_seen 2) + (!srcfileloc "rtl.h" 180) + nil ) + (!pair "size_known_p" + (!type already_seen 2) + (!srcfileloc "rtl.h" 183) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "MEM") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 229 nil gc_used "rtx_def_concatn" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtvec" + (!type pointer 230 nil gc_used + (!type struct 231 + (!type already_seen 230) + gc_pointed_to "rtvec_def" + (!srcfileloc "rtl.h" 738) + (!fields 2 + (!pair "num_elem" + (!type already_seen 2) + (!srcfileloc "rtl.h" 736) + nil ) + (!pair "elem" + (!type array 232 nil gc_used "1" + (!type already_seen 99) + ) + (!srcfileloc "rtl.h" 737) + (!options + (!option length string "%h.num_elem") + ) + ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CONCATN") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 233 nil gc_used "rtx_def_concat" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CONCAT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 234 nil gc_used "rtx_def_strict_low_part" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "STRICT_LOW_PART") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 235 nil gc_used "rtx_def_subreg" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_subreg" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SUBREG") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 236 nil gc_used "rtx_def_scratch" + (!srcfileloc "rtl.h" 452) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SCRATCH") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 237 nil gc_used "rtx_def_reg" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair "reg.attrs" + (!type pointer 238 nil gc_used + (!type struct 239 + (!type already_seen 238) + gc_pointed_to "reg_attrs" + (!srcfileloc "rtl.h" 229) + (!fields 2 + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "rtl.h" 195) + nil ) + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "rtl.h" 196) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + ) + (!srcfileloc "gengtype.cc" 1361) + nil ) + (!pair ".fld[0].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "REG") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 240 nil gc_used "rtx_def_pc" + (!srcfileloc "rtl.h" 452) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "PC") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 241 nil gc_used "rtx_def_const" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CONST") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 242 nil gc_used "rtx_def_const_string" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_str" + (!type already_seen 11) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CONST_STRING") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 243 nil gc_used "rtx_def_const_vector" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtvec" + (!type already_seen 230) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CONST_VECTOR") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 244 nil gc_used "rtx_def_const_double" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CONST_DOUBLE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 245 nil gc_used "rtx_def_const_fixed" + (!srcfileloc "rtl.h" 452) + (!fields 3 + (!pair ".fld[2].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CONST_FIXED") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 246 nil gc_used "rtx_def_const_poly_int" + (!srcfileloc "rtl.h" 452) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CONST_POLY_INT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 247 nil gc_used "rtx_def_const_wide_int" + (!srcfileloc "rtl.h" 452) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CONST_WIDE_INT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 248 nil gc_used "rtx_def_const_int" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CONST_INT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 249 nil gc_used "rtx_def_trap_if" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "TRAP_IF") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 250 nil gc_used "rtx_def_eh_return" + (!srcfileloc "rtl.h" 452) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "EH_RETURN") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 251 nil gc_used "rtx_def_simple_return" + (!srcfileloc "rtl.h" 452) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SIMPLE_RETURN") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 252 nil gc_used "rtx_def_return" + (!srcfileloc "rtl.h" 452) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "RETURN") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 253 nil gc_used "rtx_def_call" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CALL") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 254 nil gc_used "rtx_def_clobber" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CLOBBER") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 255 nil gc_used "rtx_def_use" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "USE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 256 nil gc_used "rtx_def_set" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SET") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 257 nil gc_used "rtx_def_prefetch" + (!srcfileloc "rtl.h" 452) + (!fields 3 + (!pair ".fld[2].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "PREFETCH") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 258 nil gc_used "rtx_def_addr_diff_vec" + (!srcfileloc "rtl.h" 452) + (!fields 5 + (!pair ".fld[4].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[3].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[2].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtvec" + (!type already_seen 230) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ADDR_DIFF_VEC") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 259 nil gc_used "rtx_def_addr_vec" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtvec" + (!type already_seen 230) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ADDR_VEC") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 260 nil gc_used "rtx_def_unspec_volatile" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtvec" + (!type already_seen 230) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNSPEC_VOLATILE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 261 nil gc_used "rtx_def_unspec" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtvec" + (!type already_seen 230) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNSPEC") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 262 nil gc_used "rtx_def_asm_operands" + (!srcfileloc "rtl.h" 452) + (!fields 7 + (!pair ".fld[6].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[5].rt_rtvec" + (!type already_seen 230) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[4].rt_rtvec" + (!type already_seen 230) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[3].rt_rtvec" + (!type already_seen 230) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[2].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_str" + (!type already_seen 11) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_str" + (!type already_seen 11) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ASM_OPERANDS") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 263 nil gc_used "rtx_def_asm_input" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_str" + (!type already_seen 11) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ASM_INPUT") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 264 nil gc_used "rtx_def_parallel" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtvec" + (!type already_seen 230) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "PARALLEL") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 265 nil gc_used "rtx_def_cond_exec" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "COND_EXEC") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 266 nil gc_used "rtx_def_note" + (!srcfileloc "rtl.h" 452) + (!fields 6 + (!pair ".fld[5].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[4].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[3]." + (!type union 267 nil gc_used "rtx_def_note_subunion" + (!srcfileloc "rtl.h" 452) + (!fields 19 + (!pair "rt_str" + (!type already_seen 11) + (!srcfileloc "gengtype.cc" 1196) + (!options + (!option default string "") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_UPDATE_SJLJ_CONTEXT") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_CFI_LABEL") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_CFI") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_SWITCH_TEXT_SECTIONS") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_BASIC_BLOCK") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_INLINE_ENTRY") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_BEGIN_STMT") + (!option dot string "") + ) + ) + (!pair "rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1205) + (!options + (!option tag string "NOTE_INSN_VAR_LOCATION") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_EH_REGION_END") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_EH_REGION_BEG") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_EPILOGUE_BEG") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_PROLOGUE_END") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_FUNCTION_BEG") + (!option dot string "") + ) + ) + (!pair "rt_tree" + (!type already_seen 23) + (!srcfileloc "gengtype.cc" 1201) + (!options + (!option tag string "NOTE_INSN_BLOCK_END") + (!option dot string "") + ) + ) + (!pair "rt_tree" + (!type already_seen 23) + (!srcfileloc "gengtype.cc" 1201) + (!options + (!option tag string "NOTE_INSN_BLOCK_BEG") + (!option dot string "") + ) + ) + (!pair "rt_str" + (!type already_seen 11) + (!srcfileloc "gengtype.cc" 1196) + (!options + (!option tag string "NOTE_INSN_DELETED_DEBUG_LABEL") + (!option dot string "") + ) + ) + (!pair "rt_str" + (!type already_seen 11) + (!srcfileloc "gengtype.cc" 1196) + (!options + (!option tag string "NOTE_INSN_DELETED_LABEL") + (!option dot string "") + ) + ) + (!pair "rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1209) + (!options + (!option tag string "NOTE_INSN_DELETED") + (!option dot string "") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option desc string "NOTE_KIND (&%0)") + (!option dot string "") + ) + ) + (!pair ".fld[2].rt_bb" + (!type pointer 268 nil gc_used + (!type struct 269 + (!type already_seen 268) + gc_pointed_to "basic_block_def" + (!srcfileloc "basic-block.h" 156) + (!fields 12 + (!pair "preds" + (!type pointer 270 nil gc_used + (!type user_struct 271 + (!type already_seen 270) + gc_pointed_to "vec" + (!srcfileloc "basic-block.h" 119) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "basic-block.h" 119) + nil ) + (!pair "edge" + (!type pointer 272 nil gc_used + (!type user_struct 273 + (!type already_seen 272) + gc_pointed_to "edge_def" + (!srcfileloc "basic-block.h" 53) + (!fields 0 ) + ) + ) + (!srcfileloc "basic-block.h" 119) + nil ) + ) + ) + ) + (!srcfileloc "basic-block.h" 119) + nil ) + (!pair "succs" + (!type already_seen 270) + (!srcfileloc "basic-block.h" 120) + nil ) + (!pair "aux" + (!type already_seen 3) + (!srcfileloc "basic-block.h" 123) + (!options + (!option skip string "") + ) + ) + (!pair "loop_father" + (!type pointer 274 nil gc_used + (!type struct 275 + (!type already_seen 274) + gc_pointed_to "loop" + (!srcfileloc "cfgloop.h" 275) + (!fields 37 + (!pair "num" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 121) + nil ) + (!pair "ninsns" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 124) + nil ) + (!pair "header" + (!type already_seen 268) + (!srcfileloc "cfgloop.h" 127) + nil ) + (!pair "latch" + (!type already_seen 268) + (!srcfileloc "cfgloop.h" 130) + nil ) + (!pair "lpt_decision" + (!type struct 276 nil gc_used "lpt_decision" + (!srcfileloc "cfgloop.h" 37) + (!fields 2 + (!pair "decision" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 35) + nil ) + (!pair "times" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 36) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "cfgloop.h" 133) + nil ) + (!pair "av_ninsns" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 136) + nil ) + (!pair "num_nodes" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 139) + nil ) + (!pair "superloops" + (!type pointer 277 nil gc_used + (!type user_struct 278 + (!type already_seen 277) + gc_pointed_to "vec" + (!srcfileloc "cfgloop.h" 142) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cfgloop.h" 142) + nil ) + (!pair "loop_p" + (!type already_seen 274) + (!srcfileloc "cfgloop.h" 142) + nil ) + ) + ) + ) + (!srcfileloc "cfgloop.h" 142) + nil ) + (!pair "inner" + (!type already_seen 274) + (!srcfileloc "cfgloop.h" 145) + nil ) + (!pair "next" + (!type already_seen 274) + (!srcfileloc "cfgloop.h" 148) + nil ) + (!pair "aux" + (!type already_seen 3) + (!srcfileloc "cfgloop.h" 151) + (!options + (!option skip string "") + ) + ) + (!pair "nb_iterations" + (!type already_seen 23) + (!srcfileloc "cfgloop.h" 160) + nil ) + (!pair "nb_iterations_upper_bound" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 164) + nil ) + (!pair "nb_iterations_likely_upper_bound" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 166) + nil ) + (!pair "nb_iterations_estimate" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 171) + nil ) + (!pair "safelen" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 177) + nil ) + (!pair "simdlen" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 180) + nil ) + (!pair "constraints" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 203) + nil ) + (!pair "estimate_state" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 207) + nil ) + (!pair "any_upper_bound" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 209) + nil ) + (!pair "any_estimate" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 210) + nil ) + (!pair "any_likely_upper_bound" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 211) + nil ) + (!pair "can_be_parallel" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 214) + nil ) + (!pair "warned_aggressive_loop_optimizations" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 218) + nil ) + (!pair "dont_vectorize" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 221) + nil ) + (!pair "force_vectorize" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 224) + nil ) + (!pair "in_oacc_kernels_region" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 227) + nil ) + (!pair "finite_p" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 231) + nil ) + (!pair "unroll" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 237) + nil ) + (!pair "owned_clique" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 241) + nil ) + (!pair "simduid" + (!type already_seen 23) + (!srcfileloc "cfgloop.h" 246) + nil ) + (!pair "orig_loop_num" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 256) + nil ) + (!pair "bounds" + (!type pointer 279 nil gc_used + (!type struct 280 + (!type already_seen 279) + gc_pointed_to "nb_iter_bound" + (!srcfileloc "cfgloop.h" 70) + (!fields 4 + (!pair "stmt" + (!type pointer 281 nil gc_used + (!type struct 282 + (!type already_seen 281) + gc_pointed_to "gimple" + (!srcfileloc "gimple.h" 287) + (!fields 15 + (!pair "code" + (!type already_seen 2) + (!srcfileloc "gimple.h" 228) + nil ) + (!pair "no_warning" + (!type already_seen 2) + (!srcfileloc "gimple.h" 231) + nil ) + (!pair "visited" + (!type already_seen 2) + (!srcfileloc "gimple.h" 235) + nil ) + (!pair "nontemporal_move" + (!type already_seen 2) + (!srcfileloc "gimple.h" 238) + nil ) + (!pair "plf" + (!type already_seen 2) + (!srcfileloc "gimple.h" 245) + nil ) + (!pair "modified" + (!type already_seen 2) + (!srcfileloc "gimple.h" 249) + nil ) + (!pair "has_volatile_ops" + (!type already_seen 2) + (!srcfileloc "gimple.h" 252) + nil ) + (!pair "pad" + (!type already_seen 2) + (!srcfileloc "gimple.h" 255) + nil ) + (!pair "subcode" + (!type already_seen 2) + (!srcfileloc "gimple.h" 261) + nil ) + (!pair "uid" + (!type already_seen 2) + (!srcfileloc "gimple.h" 266) + nil ) + (!pair "location" + (!type already_seen 2) + (!srcfileloc "gimple.h" 270) + nil ) + (!pair "num_ops" + (!type already_seen 2) + (!srcfileloc "gimple.h" 273) + nil ) + (!pair "bb" + (!type already_seen 268) + (!srcfileloc "gimple.h" 277) + nil ) + (!pair "next" + (!type already_seen 281) + (!srcfileloc "gimple.h" 285) + nil ) + (!pair "prev" + (!type already_seen 281) + (!srcfileloc "gimple.h" 286) + (!options + (!option skip string "") + ) + ) + ) + (!options + (!option variable_size string "") + (!option chain_next string "%h.next") + (!option tag string "GSS_BASE") + (!option desc string "gimple_statement_structure (&%h)") + ) + 1023 nil nil ) + ) + (!srcfileloc "cfgloop.h" 52) + nil ) + (!pair "bound" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 61) + nil ) + (!pair "is_exit" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 66) + nil ) + (!pair "next" + (!type already_seen 279) + (!srcfileloc "cfgloop.h" 69) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 1023 nil nil ) + ) + (!srcfileloc "cfgloop.h" 259) + nil ) + (!pair "control_ivs" + (!type pointer 283 nil gc_used + (!type struct 284 + (!type already_seen 283) + gc_pointed_to "control_iv" + (!srcfileloc "cfgloop.h" 114) + (!fields 3 + (!pair "base" + (!type already_seen 23) + (!srcfileloc "cfgloop.h" 111) + nil ) + (!pair "step" + (!type already_seen 23) + (!srcfileloc "cfgloop.h" 112) + nil ) + (!pair "next" + (!type already_seen 283) + (!srcfileloc "cfgloop.h" 113) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 1023 nil nil ) + ) + (!srcfileloc "cfgloop.h" 262) + nil ) + (!pair "exits" + (!type pointer 285 nil gc_used + (!type struct 286 + (!type already_seen 285) + gc_pointed_to "loop_exit" + (!srcfileloc "cfgloop.h" 84) + (!fields 4 + (!pair "e" + (!type already_seen 272) + (!srcfileloc "cfgloop.h" 76) + nil ) + (!pair "prev" + (!type already_seen 285) + (!srcfileloc "cfgloop.h" 79) + nil ) + (!pair "next" + (!type already_seen 285) + (!srcfileloc "cfgloop.h" 80) + nil ) + (!pair "next_e" + (!type already_seen 285) + (!srcfileloc "cfgloop.h" 83) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + ) + (!srcfileloc "cfgloop.h" 265) + nil ) + (!pair "simple_loop_desc" + (!type pointer 287 nil gc_used + (!type struct 288 + (!type already_seen 287) + gc_pointed_to "niter_desc" + (!srcfileloc "cfgloop.h" 497) + (!fields 11 + (!pair "out_edge" + (!type already_seen 272) + (!srcfileloc "cfgloop.h" 464) + nil ) + (!pair "in_edge" + (!type already_seen 272) + (!srcfileloc "cfgloop.h" 467) + nil ) + (!pair "simple_p" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 471) + nil ) + (!pair "const_iter" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 474) + nil ) + (!pair "niter" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 477) + nil ) + (!pair "assumptions" + (!type already_seen 99) + (!srcfileloc "cfgloop.h" 480) + nil ) + (!pair "noloop_assumptions" + (!type already_seen 99) + (!srcfileloc "cfgloop.h" 484) + nil ) + (!pair "infinite" + (!type already_seen 99) + (!srcfileloc "cfgloop.h" 487) + nil ) + (!pair "signed_p" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 490) + nil ) + (!pair "mode" + (!type struct 289 nil gc_used "scalar_int_mode" + (!srcfileloc "coretypes.h" 66) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "cfgloop.h" 493) + nil ) + (!pair "niter_expr" + (!type already_seen 99) + (!srcfileloc "cfgloop.h" 496) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "cfgloop.h" 268) + nil ) + (!pair "former_header" + (!type already_seen 268) + (!srcfileloc "cfgloop.h" 274) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 1023 nil nil ) + ) + (!srcfileloc "basic-block.h" 126) + nil ) + (!pair "dom" + (!type array 290 nil gc_unused "2" + (!type pointer 291 nil gc_unused + (!type struct 292 + (!type already_seen 291) + gc_unused "et_node"nil + (!fields 0 ) + nil 0 nil nil ) + ) + ) + (!srcfileloc "basic-block.h" 129) + (!options + (!option skip string "") + ) + ) + (!pair "prev_bb" + (!type already_seen 268) + (!srcfileloc "basic-block.h" 132) + nil ) + (!pair "next_bb" + (!type already_seen 268) + (!srcfileloc "basic-block.h" 133) + nil ) + (!pair "il" + (!type union 293 nil gc_used "basic_block_il_dependent" + (!srcfileloc "basic-block.h" 141) + (!fields 2 + (!pair "gimple" + (!type struct 294 nil gc_used "gimple_bb_info" + (!srcfileloc "basic-block.h" 89) + (!fields 2 + (!pair "seq" + (!type already_seen 281) + (!srcfileloc "basic-block.h" 85) + nil ) + (!pair "phi_nodes" + (!type already_seen 281) + (!srcfileloc "basic-block.h" 88) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "basic-block.h" 136) + (!options + (!option tag string "0") + ) + ) + (!pair "x" + (!type struct 295 nil gc_used "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/basic-block.h:137" + (!srcfileloc "basic-block.h" 140) + (!fields 2 + (!pair "head_" + (!type pointer 296 nil gc_used + (!type struct 297 + (!type already_seen 296) + gc_pointed_to "rtx_insn" + (!srcfileloc "rtl.h" 574) + (!fields 0 ) + nil 1023 nil + (!type already_seen 101) + ) + ) + (!srcfileloc "basic-block.h" 138) + nil ) + (!pair "rtl" + (!type pointer 298 nil gc_used + (!type struct 299 + (!type already_seen 298) + gc_pointed_to "rtl_bb_info" + (!srcfileloc "basic-block.h" 81) + (!fields 3 + (!pair "end_" + (!type already_seen 296) + (!srcfileloc "basic-block.h" 75) + nil ) + (!pair "header_" + (!type already_seen 296) + (!srcfileloc "basic-block.h" 79) + nil ) + (!pair "footer_" + (!type already_seen 296) + (!srcfileloc "basic-block.h" 80) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "basic-block.h" 139) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "basic-block.h" 140) + (!options + (!option tag string "1") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "basic-block.h" 141) + (!options + (!option desc string "((%1.flags & BB_RTL) != 0)") + ) + ) + (!pair "flags" + (!type already_seen 2) + (!srcfileloc "basic-block.h" 144) + nil ) + (!pair "index" + (!type already_seen 2) + (!srcfileloc "basic-block.h" 147) + nil ) + (!pair "count" + (!type struct 300 nil gc_used "profile_count" + (!srcfileloc "profile-count.h" 1249) + (!fields 2 + (!pair "UINT64_BIT_FIELD_ALIGN" + (!type already_seen 2) + (!srcfileloc "profile-count.h" 714) + nil ) + (!pair "m_quality" + (!type already_seen 2) + (!srcfileloc "profile-count.h" 716) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "basic-block.h" 150) + nil ) + (!pair "discriminator" + (!type already_seen 2) + (!srcfileloc "basic-block.h" 155) + nil ) + ) + (!options + (!option chain_prev string "%h.prev_bb") + (!option chain_next string "%h.next_bb") + ) + 1023 nil nil ) + ) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "NOTE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 301 nil gc_used "rtx_def_code_label" + (!srcfileloc "rtl.h" 452) + (!fields 7 + (!pair ".fld[6].rt_str" + (!type already_seen 11) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[5].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[4].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[3].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[2].rt_bb" + (!type already_seen 268) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CODE_LABEL") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 302 nil gc_used "rtx_def_barrier" + (!srcfileloc "rtl.h" 452) + (!fields 7 + (!pair ".fld[6].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[5].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[4].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[3].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[2].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "BARRIER") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 303 nil gc_used "rtx_def_jump_table_data" + (!srcfileloc "rtl.h" 452) + (!fields 8 + (!pair ".fld[7].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[6].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[5].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[4].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[3].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[2].rt_bb" + (!type already_seen 268) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "JUMP_TABLE_DATA") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 304 nil gc_used "rtx_def_call_insn" + (!srcfileloc "rtl.h" 452) + (!fields 8 + (!pair ".fld[7].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[6].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[5].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[4].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[3].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[2].rt_bb" + (!type already_seen 268) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "CALL_INSN") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 305 nil gc_used "rtx_def_jump_insn" + (!srcfileloc "rtl.h" 452) + (!fields 8 + (!pair ".fld[7].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[6].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[5].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[4].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[3].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[2].rt_bb" + (!type already_seen 268) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "JUMP_INSN") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 306 nil gc_used "rtx_def_insn" + (!srcfileloc "rtl.h" 452) + (!fields 7 + (!pair ".fld[6].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[5].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[4].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[3].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[2].rt_bb" + (!type already_seen 268) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "INSN") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 307 nil gc_used "rtx_def_debug_insn" + (!srcfileloc "rtl.h" 452) + (!fields 7 + (!pair ".fld[6].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[5].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[4].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[3].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[2].rt_bb" + (!type already_seen 268) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "DEBUG_INSN") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 308 nil gc_used "rtx_def_address" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "ADDRESS") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 309 nil gc_used "rtx_def_sequence" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_rtvec" + (!type already_seen 230) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "SEQUENCE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 310 nil gc_used "rtx_def_int_list" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "INT_LIST") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 311 nil gc_used "rtx_def_insn_list" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "INSN_LIST") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 312 nil gc_used "rtx_def_expr_list" + (!srcfileloc "rtl.h" 452) + (!fields 2 + (!pair ".fld[1].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + (!pair ".fld[0].rt_rtx" + (!type already_seen 99) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "EXPR_LIST") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 313 nil gc_used "rtx_def_debug_expr" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_tree" + (!type already_seen 23) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "DEBUG_EXPR") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 314 nil gc_used "rtx_def_value" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "VALUE") + (!option dot string "") + ) + ) + (!pair "" + (!type struct 315 nil gc_used "rtx_def_UnKnown" + (!srcfileloc "rtl.h" 452) + (!fields 1 + (!pair ".fld[0].rt_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 1345) + (!options + (!option dot string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gengtype.cc" 1380) + (!options + (!option tag string "UNKNOWN") + (!option dot string "") + ) + ) + ) + (!options + (!option dot string "") + ) + 1023 nil ) + (!srcfileloc "rtl.h" 452) + (!options + (!option desc string "GET_CODE (&%0)") + (!option special string "rtx_def") + ) + ) + ) + (!options + (!option chain_prev string "RTX_PREV (&%h)") + (!option chain_next string "RTX_NEXT (&%h)") + (!option tag string "0") + (!option desc string "0") + ) + 1023 nil nil ) + ) + (!srcfileloc "tree-core.h" 1832) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2056) + (!options + (!option tag string "TS_DECL_WRTL") + ) + ) + (!pair "decl_non_common" + (!type struct 316 nil gc_used "tree_decl_non_common" + (!srcfileloc "tree-core.h" 1909) + (!fields 2 + (!pair "common" + (!type struct 317 nil gc_used "tree_decl_with_vis" + (!srcfileloc "tree-core.h" 1899) + (!fields 20 + (!pair "common" + (!type already_seen 98) + (!srcfileloc "tree-core.h" 1865) + nil ) + (!pair "assembler_name" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1866) + nil ) + (!pair "symtab_node" + (!type pointer 318 nil gc_used + (!type struct 319 + (!type already_seen 318) + gc_pointed_to "symtab_node" + (!srcfileloc "cgraph.h" 676) + (!fields 42 + (!pair "DEBUG_FUNCTION" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 162) + nil ) + (!pair "DEBUG_FUNCTION" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 165) + nil ) + (!pair "type" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 485) + nil ) + (!pair "resolution" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 488) + nil ) + (!pair "definition" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 494) + nil ) + (!pair "alias" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 497) + nil ) + (!pair "transparent_alias" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 519) + nil ) + (!pair "weakref" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 521) + nil ) + (!pair "cpp_implicit_alias" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 528) + nil ) + (!pair "symver" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 530) + nil ) + (!pair "analyzed" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 533) + nil ) + (!pair "writeonly" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 535) + nil ) + (!pair "refuse_visibility_changes" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 538) + nil ) + (!pair "externally_visible" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 543) + nil ) + (!pair "no_reorder" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 545) + nil ) + (!pair "force_output" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 548) + nil ) + (!pair "forced_by_abi" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 552) + nil ) + (!pair "unique_name" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 554) + nil ) + (!pair "implicit_section" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 557) + nil ) + (!pair "body_removed" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 560) + nil ) + (!pair "semantic_interposition" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 562) + nil ) + (!pair "used_from_other_partition" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 569) + nil ) + (!pair "in_other_partition" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 573) + nil ) + (!pair "address_taken" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 580) + nil ) + (!pair "in_init_priority_hash" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 582) + nil ) + (!pair "need_lto_streaming" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 586) + nil ) + (!pair "offloadable" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 589) + nil ) + (!pair "ifunc_resolver" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 592) + nil ) + (!pair "order" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 596) + nil ) + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 599) + nil ) + (!pair "next" + (!type already_seen 318) + (!srcfileloc "cgraph.h" 602) + nil ) + (!pair "previous" + (!type already_seen 318) + (!srcfileloc "cgraph.h" 603) + nil ) + (!pair "next_sharing_asm_name" + (!type already_seen 318) + (!srcfileloc "cgraph.h" 614) + nil ) + (!pair "previous_sharing_asm_name" + (!type already_seen 318) + (!srcfileloc "cgraph.h" 615) + nil ) + (!pair "same_comdat_group" + (!type already_seen 318) + (!srcfileloc "cgraph.h" 618) + nil ) + (!pair "ref_list" + (!type struct 320 nil gc_unused "ipa_ref_list" + (!srcfileloc "cgraph.h" 621) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "cgraph.h" 621) + (!options + (!option skip string "") + ) + ) + (!pair "alias_target" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 626) + nil ) + (!pair "lto_file_data" + (!type pointer 321 + (!type pointer 322 nil gc_used + (!type already_seen 321) + ) + gc_pointed_to + (!type struct 323 + (!type already_seen 321) + gc_pointed_to "lto_file_decl_data" + (!srcfileloc "lto-streamer.h" 605) + (!fields 18 + (!pair "current_decl_state" + (!type pointer 324 nil gc_used + (!type struct 325 + (!type already_seen 324) + gc_pointed_to "lto_in_decl_state" + (!srcfileloc "lto-streamer.h" 500) + (!fields 3 + (!pair "streams" + (!type array 326 nil gc_used "LTO_N_DECL_STREAMS" + (!type already_seen 84) + ) + (!srcfileloc "lto-streamer.h" 492) + nil ) + (!pair "fn_decl" + (!type already_seen 23) + (!srcfileloc "lto-streamer.h" 496) + nil ) + (!pair "compressed" + (!type already_seen 2) + (!srcfileloc "lto-streamer.h" 499) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + ) + (!srcfileloc "lto-streamer.h" 557) + nil ) + (!pair "global_decl_state" + (!type already_seen 324) + (!srcfileloc "lto-streamer.h" 561) + nil ) + (!pair "symtab_node_encoder" + (!type pointer 327 nil gc_unused + (!type struct 328 + (!type already_seen 327) + gc_unused "lto_symtab_encoder_d"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "lto-streamer.h" 564) + (!options + (!option skip string "") + ) + ) + (!pair "function_decl_states" + (!type pointer 329 nil gc_used + (!type user_struct 330 + (!type already_seen 329) + gc_pointed_to "hash_table" + (!srcfileloc "lto-streamer.h" 567) + (!fields 1 + (!pair "decl_state_hasher" + (!type struct 331 nil gc_used "decl_state_hasher" + (!srcfileloc "lto-streamer.h" 567) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "lto-streamer.h" 567) + nil ) + ) + ) + ) + (!srcfileloc "lto-streamer.h" 567) + nil ) + (!pair "file_name" + (!type already_seen 11) + (!srcfileloc "lto-streamer.h" 570) + (!options + (!option skip string "") + ) + ) + (!pair "section_hash_table" + (!type pointer 332 nil gc_unused + (!type struct 333 + (!type already_seen 332) + gc_unused "htab"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "lto-streamer.h" 573) + (!options + (!option skip string "") + ) + ) + (!pair "renaming_hash_table" + (!type already_seen 332) + (!srcfileloc "lto-streamer.h" 576) + (!options + (!option skip string "") + ) + ) + (!pair "next" + (!type already_seen 321) + (!srcfileloc "lto-streamer.h" 579) + nil ) + (!pair "order" + (!type already_seen 2) + (!srcfileloc "lto-streamer.h" 582) + nil ) + (!pair "id" + (!type already_seen 2) + (!srcfileloc "lto-streamer.h" 585) + nil ) + (!pair "respairs" + (!type user_struct 334 nil gc_unused "vec" + (!srcfileloc "lto-streamer.h" 588) + (!fields 1 + (!pair "res_pair" + (!type struct 335 nil gc_unused "res_pair" + (!srcfileloc "lto-streamer.h" 588) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "lto-streamer.h" 588) + nil ) + ) + ) + (!srcfileloc "lto-streamer.h" 588) + (!options + (!option skip string "") + ) + ) + (!pair "max_index" + (!type already_seen 2) + (!srcfileloc "lto-streamer.h" 589) + nil ) + (!pair "profile_info" + (!type undefined 336 nil gc_unused "gcov_summary" + (!srcfileloc "lto-streamer.h" 591) + ) + (!srcfileloc "lto-streamer.h" 591) + (!options + (!option skip string "") + ) + ) + (!pair "resolution_map" + (!type pointer 337 nil gc_unused + (!type user_struct 338 + (!type already_seen 337) + gc_unused "hash_map" + (!srcfileloc "lto-streamer.h" 594) + (!fields 2 + (!pair "ld_plugin_symbol_resolution" + (!type undefined 339 nil gc_unused "ld_plugin_symbol_resolution" + (!srcfileloc "lto-streamer.h" 594) + ) + (!srcfileloc "lto-streamer.h" 594) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "lto-streamer.h" 594) + nil ) + ) + ) + ) + (!srcfileloc "lto-streamer.h" 594) + (!options + (!option skip string "") + ) + ) + (!pair "mode_table" + (!type already_seen 11) + (!srcfileloc "lto-streamer.h" 597) + nil ) + (!pair "lto_section_header" + (!type struct 340 nil gc_used "lto_section" + (!srcfileloc "lto-streamer.h" 600) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "lto-streamer.h" 600) + nil ) + (!pair "order_base" + (!type already_seen 2) + (!srcfileloc "lto-streamer.h" 602) + nil ) + (!pair "unit_base" + (!type already_seen 2) + (!srcfileloc "lto-streamer.h" 604) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "cgraph.h" 629) + nil ) + (!pair "aux" + (!type already_seen 3) + (!srcfileloc "cgraph.h" 631) + (!options + (!option skip string "") + ) + ) + (!pair "x_comdat_group" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 634) + nil ) + (!pair "x_section" + (!type pointer 341 nil gc_used + (!type struct 342 + (!type already_seen 341) + gc_pointed_to "section_hash_entry" + (!srcfileloc "cgraph.h" 637) + (!fields 2 + (!pair "ref_count" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 54) + nil ) + (!pair "name" + (!type already_seen 11) + (!srcfileloc "cgraph.h" 55) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + ) + (!srcfileloc "cgraph.h" 637) + nil ) + (!pair "DEBUG_FUNCTION" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 644) + nil ) + ) + (!options + (!option chain_prev string "%h.previous") + (!option chain_next string "%h.next") + (!option tag string "SYMTAB_SYMBOL") + (!option desc string "%h.type") + ) + 1023 nil nil ) + ) + (!srcfileloc "tree-core.h" 1867) + nil ) + (!pair "defer_output" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1870) + nil ) + (!pair "hard_register" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1871) + nil ) + (!pair "common_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1872) + nil ) + (!pair "in_text_section" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1873) + nil ) + (!pair "in_constant_pool" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1874) + nil ) + (!pair "dllimport_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1875) + nil ) + (!pair "weak_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1877) + nil ) + (!pair "seen_in_bind_expr" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1879) + nil ) + (!pair "comdat_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1880) + nil ) + (!pair "visibility" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1882) + nil ) + (!pair "visibility_specified" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1883) + nil ) + (!pair "init_priority_p" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1886) + nil ) + (!pair "shadowed_for_var_p" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1888) + nil ) + (!pair "cxx_constructor" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1890) + nil ) + (!pair "cxx_destructor" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1892) + nil ) + (!pair "final" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1894) + nil ) + (!pair "regdecl_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1896) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 1906) + nil ) + (!pair "result" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1908) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2058) + (!options + (!option tag string "TS_DECL_NON_COMMON") + ) + ) + (!pair "parm_decl" + (!type struct 343 nil gc_used "tree_parm_decl" + (!srcfileloc "tree-core.h" 1862) + (!fields 2 + (!pair "common" + (!type already_seen 98) + (!srcfileloc "tree-core.h" 1860) + nil ) + (!pair "incoming_rtl" + (!type already_seen 99) + (!srcfileloc "tree-core.h" 1861) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2059) + (!options + (!option tag string "TS_PARM_DECL") + ) + ) + (!pair "decl_with_vis" + (!type already_seen 317) + (!srcfileloc "tree-core.h" 2060) + (!options + (!option tag string "TS_DECL_WITH_VIS") + ) + ) + (!pair "var_decl" + (!type struct 344 nil gc_used "tree_var_decl" + (!srcfileloc "tree-core.h" 1903) + (!fields 1 + (!pair "common" + (!type already_seen 317) + (!srcfileloc "tree-core.h" 1902) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2061) + (!options + (!option tag string "TS_VAR_DECL") + ) + ) + (!pair "field_decl" + (!type struct 345 nil gc_used "tree_field_decl" + (!srcfileloc "tree-core.h" 1843) + (!fields 6 + (!pair "common" + (!type already_seen 53) + (!srcfileloc "tree-core.h" 1836) + nil ) + (!pair "offset" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1838) + nil ) + (!pair "bit_field_type" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1839) + nil ) + (!pair "qualifier" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1840) + nil ) + (!pair "bit_offset" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1841) + nil ) + (!pair "fcontext" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1842) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2062) + (!options + (!option tag string "TS_FIELD_DECL") + ) + ) + (!pair "label_decl" + (!type struct 346 nil gc_used "tree_label_decl" + (!srcfileloc "tree-core.h" 1849) + (!fields 3 + (!pair "common" + (!type already_seen 98) + (!srcfileloc "tree-core.h" 1846) + nil ) + (!pair "label_decl_uid" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1847) + nil ) + (!pair "eh_landing_pad_nr" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1848) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2063) + (!options + (!option tag string "TS_LABEL_DECL") + ) + ) + (!pair "result_decl" + (!type struct 347 nil gc_used "tree_result_decl" + (!srcfileloc "tree-core.h" 1853) + (!fields 1 + (!pair "common" + (!type already_seen 98) + (!srcfileloc "tree-core.h" 1852) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2064) + (!options + (!option tag string "TS_RESULT_DECL") + ) + ) + (!pair "const_decl" + (!type struct 348 nil gc_used "tree_const_decl" + (!srcfileloc "tree-core.h" 1857) + (!fields 1 + (!pair "common" + (!type already_seen 53) + (!srcfileloc "tree-core.h" 1856) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2065) + (!options + (!option tag string "TS_CONST_DECL") + ) + ) + (!pair "type_decl" + (!type struct 349 nil gc_used "tree_type_decl" + (!srcfileloc "tree-core.h" 1988) + (!fields 1 + (!pair "common" + (!type already_seen 316) + (!srcfileloc "tree-core.h" 1986) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2066) + (!options + (!option tag string "TS_TYPE_DECL") + ) + ) + (!pair "function_decl" + (!type struct 350 nil gc_used "tree_function_decl" + (!srcfileloc "tree-core.h" 1975) + (!fields 28 + (!pair "common" + (!type already_seen 316) + (!srcfileloc "tree-core.h" 1929) + nil ) + (!pair "f" + (!type pointer 351 nil gc_used + (!type struct 352 + (!type already_seen 351) + gc_pointed_to "function" + (!srcfileloc "function.h" 441) + (!fields 51 + (!pair "eh" + (!type pointer 353 nil gc_used + (!type struct 354 + (!type already_seen 353) + gc_pointed_to "eh_status" + (!srcfileloc "except.h" 218) + (!fields 6 + (!pair "region_tree" + (!type pointer 355 nil gc_used + (!type struct 356 + (!type already_seen 355) + gc_pointed_to "eh_region_d" + (!srcfileloc "except.h" 180) + (!fields 10 + (!pair "outer" + (!type already_seen 355) + (!srcfileloc "except.h" 121) + nil ) + (!pair "inner" + (!type already_seen 355) + (!srcfileloc "except.h" 124) + nil ) + (!pair "next_peer" + (!type already_seen 355) + (!srcfileloc "except.h" 125) + nil ) + (!pair "index" + (!type already_seen 2) + (!srcfileloc "except.h" 128) + nil ) + (!pair "type" + (!type already_seen 2) + (!srcfileloc "except.h" 131) + nil ) + (!pair "u" + (!type union 357 nil gc_used "eh_region_u" + (!srcfileloc "except.h" 167) + (!fields 3 + (!pair "eh_try" + (!type struct 358 nil gc_used "eh_region_u_try" + (!srcfileloc "except.h" 139) + (!fields 2 + (!pair "first_catch" + (!type pointer 359 nil gc_used + (!type struct 360 + (!type already_seen 359) + gc_pointed_to "eh_catch_d" + (!srcfileloc "except.h" 114) + (!fields 5 + (!pair "next_catch" + (!type already_seen 359) + (!srcfileloc "except.h" 98) + nil ) + (!pair "prev_catch" + (!type already_seen 359) + (!srcfileloc "except.h" 99) + nil ) + (!pair "type_list" + (!type already_seen 23) + (!srcfileloc "except.h" 103) + nil ) + (!pair "filter_list" + (!type already_seen 23) + (!srcfileloc "except.h" 108) + nil ) + (!pair "label" + (!type already_seen 23) + (!srcfileloc "except.h" 113) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "except.h" 137) + nil ) + (!pair "last_catch" + (!type already_seen 359) + (!srcfileloc "except.h" 138) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "except.h" 139) + (!options + (!option tag string "ERT_TRY") + ) + ) + (!pair "allowed" + (!type struct 361 nil gc_used "eh_region_u_allowed" + (!srcfileloc "except.h" 153) + (!fields 3 + (!pair "type_list" + (!type already_seen 23) + (!srcfileloc "except.h" 143) + nil ) + (!pair "label" + (!type already_seen 23) + (!srcfileloc "except.h" 147) + nil ) + (!pair "filter" + (!type already_seen 2) + (!srcfileloc "except.h" 152) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "except.h" 153) + (!options + (!option tag string "ERT_ALLOWED_EXCEPTIONS") + ) + ) + (!pair "must_not_throw" + (!type struct 362 nil gc_used "eh_region_u_must_not_throw" + (!srcfileloc "except.h" 166) + (!fields 2 + (!pair "failure_decl" + (!type already_seen 23) + (!srcfileloc "except.h" 163) + nil ) + (!pair "failure_loc" + (!type already_seen 2) + (!srcfileloc "except.h" 165) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "except.h" 166) + (!options + (!option tag string "ERT_MUST_NOT_THROW") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "except.h" 167) + (!options + (!option desc string "%0.type") + ) + ) + (!pair "landing_pads" + (!type pointer 363 nil gc_used + (!type struct 364 + (!type already_seen 363) + gc_pointed_to "eh_landing_pad_d" + (!srcfileloc "except.h" 91) + (!fields 5 + (!pair "next_lp" + (!type already_seen 363) + (!srcfileloc "except.h" 72) + nil ) + (!pair "region" + (!type already_seen 355) + (!srcfileloc "except.h" 75) + nil ) + (!pair "post_landing_pad" + (!type already_seen 23) + (!srcfileloc "except.h" 80) + nil ) + (!pair "landing_pad" + (!type pointer 365 nil gc_used + (!type struct 366 + (!type already_seen 365) + gc_pointed_to "rtx_code_label" + (!srcfileloc "function.h" 140) + (!fields 0 ) + nil 1023 nil + (!type already_seen 297) + ) + ) + (!srcfileloc "except.h" 87) + nil ) + (!pair "index" + (!type already_seen 2) + (!srcfileloc "except.h" 90) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "except.h" 170) + nil ) + (!pair "exc_ptr_reg" + (!type already_seen 99) + (!srcfileloc "except.h" 175) + nil ) + (!pair "filter_reg" + (!type already_seen 99) + (!srcfileloc "except.h" 175) + nil ) + (!pair "use_cxa_end_cleanup" + (!type already_seen 2) + (!srcfileloc "except.h" 179) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "except.h" 194) + nil ) + (!pair "region_array" + (!type pointer 367 nil gc_used + (!type user_struct 368 + (!type already_seen 367) + gc_pointed_to "vec" + (!srcfileloc "except.h" 197) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "except.h" 197) + nil ) + (!pair "eh_region" + (!type already_seen 355) + (!srcfileloc "except.h" 197) + nil ) + ) + ) + ) + (!srcfileloc "except.h" 197) + nil ) + (!pair "lp_array" + (!type pointer 369 nil gc_used + (!type user_struct 370 + (!type already_seen 369) + gc_pointed_to "vec" + (!srcfileloc "except.h" 200) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "except.h" 200) + nil ) + (!pair "eh_landing_pad" + (!type already_seen 363) + (!srcfileloc "except.h" 200) + nil ) + ) + ) + ) + (!srcfileloc "except.h" 200) + nil ) + (!pair "throw_stmt_table" + (!type pointer 371 nil gc_used + (!type user_struct 372 + (!type already_seen 371) + gc_pointed_to "hash_map" + (!srcfileloc "except.h" 204) + (!fields 2 + (!pair "int" + (!type undefined 373 nil gc_unused "int" + (!srcfileloc "coretypes.h" 363) + ) + (!srcfileloc "except.h" 204) + nil ) + (!pair "gimple" + (!type already_seen 281) + (!srcfileloc "except.h" 204) + nil ) + ) + ) + ) + (!srcfileloc "except.h" 204) + nil ) + (!pair "ttype_data" + (!type already_seen 84) + (!srcfileloc "except.h" 208) + nil ) + (!pair "ehspec_data" + (!type union 374 nil gc_used "eh_status_u" + (!srcfileloc "except.h" 217) + (!fields 2 + (!pair "arm_eabi" + (!type already_seen 84) + (!srcfileloc "except.h" 215) + (!options + (!option tag string "1") + ) + ) + (!pair "other" + (!type pointer 375 nil gc_used + (!type user_struct 376 + (!type already_seen 375) + gc_pointed_to "vec" + (!srcfileloc "function.h" 145) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "function.h" 145) + nil ) + (!pair "uchar" + (!type already_seen 8) + (!srcfileloc "function.h" 145) + nil ) + ) + ) + ) + (!srcfileloc "except.h" 216) + (!options + (!option tag string "0") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "except.h" 217) + (!options + (!option desc string "targetm.arm_eabi_unwinder") + ) + ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "function.h" 250) + nil ) + (!pair "cfg" + (!type pointer 377 nil gc_used + (!type struct 378 + (!type already_seen 377) + gc_pointed_to "control_flow_graph" + (!srcfileloc "cfg.h" 81) + (!fields 15 + (!pair "x_entry_block_ptr" + (!type already_seen 268) + (!srcfileloc "cfg.h" 41) + nil ) + (!pair "x_exit_block_ptr" + (!type already_seen 268) + (!srcfileloc "cfg.h" 42) + nil ) + (!pair "x_basic_block_info" + (!type pointer 379 nil gc_used + (!type user_struct 380 + (!type already_seen 379) + gc_pointed_to "vec" + (!srcfileloc "cfg.h" 45) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cfg.h" 45) + nil ) + (!pair "basic_block" + (!type already_seen 268) + (!srcfileloc "cfg.h" 45) + nil ) + ) + ) + ) + (!srcfileloc "cfg.h" 45) + nil ) + (!pair "x_n_basic_blocks" + (!type already_seen 2) + (!srcfileloc "cfg.h" 48) + nil ) + (!pair "x_n_edges" + (!type already_seen 2) + (!srcfileloc "cfg.h" 51) + nil ) + (!pair "x_last_basic_block" + (!type already_seen 2) + (!srcfileloc "cfg.h" 54) + nil ) + (!pair "last_label_uid" + (!type already_seen 2) + (!srcfileloc "cfg.h" 57) + nil ) + (!pair "x_label_to_block_map" + (!type already_seen 379) + (!srcfileloc "cfg.h" 61) + nil ) + (!pair "x_profile_status" + (!type already_seen 2) + (!srcfileloc "cfg.h" 63) + nil ) + (!pair "x_dom_computed" + (!type array 381 nil gc_used "2" + (!type already_seen 2) + ) + (!srcfileloc "cfg.h" 66) + nil ) + (!pair "x_n_bbs_in_dom_tree" + (!type array 382 nil gc_used "2" + (!type already_seen 2) + ) + (!srcfileloc "cfg.h" 69) + nil ) + (!pair "max_jumptable_ents" + (!type already_seen 2) + (!srcfileloc "cfg.h" 73) + nil ) + (!pair "count_max" + (!type already_seen 300) + (!srcfileloc "cfg.h" 76) + nil ) + (!pair "edge_flags_allocated" + (!type already_seen 2) + (!srcfileloc "cfg.h" 79) + nil ) + (!pair "bb_flags_allocated" + (!type already_seen 2) + (!srcfileloc "cfg.h" 80) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "function.h" 253) + nil ) + (!pair "gimple_body" + (!type already_seen 281) + (!srcfileloc "function.h" 256) + nil ) + (!pair "gimple_df" + (!type pointer 383 nil gc_used + (!type struct 384 + (!type already_seen 383) + gc_pointed_to "gimple_df" + (!srcfileloc "gimple-ssa.h" 115) + (!fields 13 + (!pair "ssa_names" + (!type already_seen 84) + (!srcfileloc "gimple-ssa.h" 74) + nil ) + (!pair "vop" + (!type already_seen 23) + (!srcfileloc "gimple-ssa.h" 77) + nil ) + (!pair "escaped" + (!type struct 385 nil gc_used "pt_solution" + (!srcfileloc "tree-ssa-alias.h" 65) + (!fields 11 + (!pair "anything" + (!type already_seen 2) + (!srcfileloc "tree-ssa-alias.h" 32) + nil ) + (!pair "nonlocal" + (!type already_seen 2) + (!srcfileloc "tree-ssa-alias.h" 36) + nil ) + (!pair "escaped" + (!type already_seen 2) + (!srcfileloc "tree-ssa-alias.h" 40) + nil ) + (!pair "ipa_escaped" + (!type already_seen 2) + (!srcfileloc "tree-ssa-alias.h" 44) + nil ) + (!pair "null" + (!type already_seen 2) + (!srcfileloc "tree-ssa-alias.h" 48) + nil ) + (!pair "vars_contains_nonlocal" + (!type already_seen 2) + (!srcfileloc "tree-ssa-alias.h" 51) + nil ) + (!pair "vars_contains_escaped" + (!type already_seen 2) + (!srcfileloc "tree-ssa-alias.h" 53) + nil ) + (!pair "vars_contains_escaped_heap" + (!type already_seen 2) + (!srcfileloc "tree-ssa-alias.h" 56) + nil ) + (!pair "vars_contains_restrict" + (!type already_seen 2) + (!srcfileloc "tree-ssa-alias.h" 59) + nil ) + (!pair "vars_contains_interposable" + (!type already_seen 2) + (!srcfileloc "tree-ssa-alias.h" 61) + nil ) + (!pair "vars" + (!type pointer 386 nil gc_used + (!type struct 387 + (!type already_seen 386) + gc_pointed_to "bitmap_head" + (!srcfileloc "bitmap.h" 361) + (!fields 7 + (!pair "indx" + (!type already_seen 2) + (!srcfileloc "bitmap.h" 335) + nil ) + (!pair "tree_form" + (!type already_seen 2) + (!srcfileloc "bitmap.h" 338) + nil ) + (!pair "padding" + (!type already_seen 2) + (!srcfileloc "bitmap.h" 340) + nil ) + (!pair "alloc_descriptor" + (!type already_seen 2) + (!srcfileloc "bitmap.h" 342) + nil ) + (!pair "first" + (!type pointer 388 nil gc_used + (!type struct 389 + (!type already_seen 388) + gc_pointed_to "bitmap_element" + (!srcfileloc "bitmap.h" 345) + (!fields 4 + (!pair "next" + (!type already_seen 388) + (!srcfileloc "bitmap.h" 313) + nil ) + (!pair "prev" + (!type already_seen 388) + (!srcfileloc "bitmap.h" 316) + nil ) + (!pair "indx" + (!type already_seen 2) + (!srcfileloc "bitmap.h" 318) + nil ) + (!pair "bits" + (!type array 390 nil gc_used "BITMAP_ELEMENT_WORDS" + (!type already_seen 2) + ) + (!srcfileloc "bitmap.h" 320) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 1023 nil nil ) + ) + (!srcfileloc "bitmap.h" 345) + nil ) + (!pair "current" + (!type already_seen 388) + (!srcfileloc "bitmap.h" 347) + (!options + (!option skip string "") + ) + ) + (!pair "obstack" + (!type pointer 391 nil gc_unused + (!type struct 392 + (!type already_seen 391) + gc_unused "bitmap_obstack" + (!srcfileloc "bitmap.h" 349) + (!fields 0 ) + nil 1023 nil nil ) + ) + (!srcfileloc "bitmap.h" 349) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "tree-ssa-alias.h" 64) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "gimple-ssa.h" 80) + nil ) + (!pair "decls_to_pointers" + (!type pointer 393 nil gc_used + (!type user_struct 394 + (!type already_seen 393) + gc_pointed_to "hash_map" + (!srcfileloc "gimple-ssa.h" 84) + (!fields 2 + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "gimple-ssa.h" 84) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "gimple-ssa.h" 84) + nil ) + ) + ) + ) + (!srcfileloc "gimple-ssa.h" 84) + (!options + (!option skip string "") + ) + ) + (!pair "free_ssanames" + (!type already_seen 84) + (!srcfileloc "gimple-ssa.h" 87) + nil ) + (!pair "free_ssanames_queue" + (!type already_seen 84) + (!srcfileloc "gimple-ssa.h" 90) + nil ) + (!pair "default_defs" + (!type pointer 395 nil gc_used + (!type user_struct 396 + (!type already_seen 395) + gc_pointed_to "hash_table" + (!srcfileloc "gimple-ssa.h" 96) + (!fields 1 + (!pair "ssa_name_hasher" + (!type struct 397 nil gc_used "ssa_name_hasher" + (!srcfileloc "gimple-ssa.h" 96) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "gimple-ssa.h" 96) + nil ) + ) + ) + ) + (!srcfileloc "gimple-ssa.h" 96) + nil ) + (!pair "ssa_renaming_needed" + (!type already_seen 2) + (!srcfileloc "gimple-ssa.h" 99) + nil ) + (!pair "rename_vops" + (!type already_seen 2) + (!srcfileloc "gimple-ssa.h" 102) + nil ) + (!pair "in_ssa_p" + (!type already_seen 2) + (!srcfileloc "gimple-ssa.h" 105) + nil ) + (!pair "ipa_pta" + (!type already_seen 2) + (!srcfileloc "gimple-ssa.h" 108) + nil ) + (!pair "ssa_operands" + (!type struct 398 nil gc_used "ssa_operands" + (!srcfileloc "tree-ssa-operands.h" 63) + (!fields 5 + (!pair "operand_memory" + (!type pointer 399 nil gc_used + (!type struct 400 + (!type already_seen 399) + gc_pointed_to "ssa_operand_memory_d" + (!srcfileloc "tree-ssa-operands.h" 51) + (!fields 2 + (!pair "next" + (!type already_seen 399) + (!srcfileloc "tree-ssa-operands.h" 49) + nil ) + (!pair "mem" + (!type array 401 nil gc_used "1" + (!type already_seen 8) + ) + (!srcfileloc "tree-ssa-operands.h" 50) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 1023 nil nil ) + ) + (!srcfileloc "tree-ssa-operands.h" 55) + nil ) + (!pair "operand_memory_index" + (!type already_seen 2) + (!srcfileloc "tree-ssa-operands.h" 56) + nil ) + (!pair "ssa_operand_mem_size" + (!type already_seen 2) + (!srcfileloc "tree-ssa-operands.h" 58) + nil ) + (!pair "ops_active" + (!type already_seen 2) + (!srcfileloc "tree-ssa-operands.h" 60) + nil ) + (!pair "free_uses" + (!type pointer 402 nil gc_unused + (!type struct 403 + (!type already_seen 402) + gc_unused "use_optype_d"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "tree-ssa-operands.h" 62) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "gimple-ssa.h" 110) + nil ) + (!pair "tm_restart" + (!type pointer 404 nil gc_used + (!type user_struct 405 + (!type already_seen 404) + gc_pointed_to "hash_table" + (!srcfileloc "gimple-ssa.h" 114) + (!fields 1 + (!pair "tm_restart_hasher" + (!type struct 406 nil gc_used "tm_restart_hasher" + (!srcfileloc "gimple-ssa.h" 114) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "gimple-ssa.h" 114) + nil ) + ) + ) + ) + (!srcfileloc "gimple-ssa.h" 114) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "function.h" 259) + nil ) + (!pair "x_current_loops" + (!type pointer 407 nil gc_used + (!type struct 408 + (!type already_seen 407) + gc_pointed_to "loops" + (!srcfileloc "cfgloop.h" 335) + (!fields 4 + (!pair "state" + (!type already_seen 2) + (!srcfileloc "cfgloop.h" 323) + nil ) + (!pair "larray" + (!type already_seen 277) + (!srcfileloc "cfgloop.h" 326) + nil ) + (!pair "exits" + (!type pointer 409 nil gc_used + (!type user_struct 410 + (!type already_seen 409) + gc_pointed_to "hash_table" + (!srcfileloc "cfgloop.h" 331) + (!fields 1 + (!pair "loop_exit_hasher" + (!type struct 411 nil gc_used "loop_exit_hasher" + (!srcfileloc "cfgloop.h" 331) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "cfgloop.h" 331) + nil ) + ) + ) + ) + (!srcfileloc "cfgloop.h" 331) + nil ) + (!pair "tree_root" + (!type already_seen 274) + (!srcfileloc "cfgloop.h" 334) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "function.h" 262) + nil ) + (!pair "pass_startwith" + (!type already_seen 11) + (!srcfileloc "function.h" 265) + nil ) + (!pair "su" + (!type pointer 412 nil gc_used + (!type struct 413 + (!type already_seen 412) + gc_pointed_to "stack_usage" + (!srcfileloc "function.h" 235) + (!fields 6 + (!pair "static_stack_size" + (!type already_seen 2) + (!srcfileloc "function.h" 214) + nil ) + (!pair "dynamic_stack_size" + (!type already_seen 2) + (!srcfileloc "function.h" 218) + nil ) + (!pair "pushed_stack_size" + (!type already_seen 2) + (!srcfileloc "function.h" 223) + nil ) + (!pair "has_unbounded_dynamic_stack_size" + (!type already_seen 2) + (!srcfileloc "function.h" 227) + nil ) + (!pair "callees" + (!type pointer 414 nil gc_used + (!type user_struct 415 + (!type already_seen 414) + gc_pointed_to "vec" + (!srcfileloc "function.h" 230) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "function.h" 230) + nil ) + (!pair "callinfo_callee" + (!type struct 416 nil gc_used "callinfo_callee" + (!srcfileloc "function.h" 230) + (!fields 2 + (!pair "location" + (!type already_seen 2) + (!srcfileloc "function.h" 199) + nil ) + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "function.h" 200) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "function.h" 230) + nil ) + ) + ) + ) + (!srcfileloc "function.h" 230) + nil ) + (!pair "dallocs" + (!type pointer 417 nil gc_used + (!type user_struct 418 + (!type already_seen 417) + gc_pointed_to "vec" + (!srcfileloc "function.h" 234) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "function.h" 234) + nil ) + (!pair "callinfo_dalloc" + (!type struct 419 nil gc_used "callinfo_dalloc" + (!srcfileloc "function.h" 234) + (!fields 2 + (!pair "location" + (!type already_seen 2) + (!srcfileloc "function.h" 206) + nil ) + (!pair "name" + (!type already_seen 11) + (!srcfileloc "function.h" 207) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "function.h" 234) + nil ) + ) + ) + ) + (!srcfileloc "function.h" 234) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "function.h" 268) + nil ) + (!pair "value_histograms" + (!type already_seen 332) + (!srcfileloc "function.h" 271) + (!options + (!option skip string "") + ) + ) + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "function.h" 276) + nil ) + (!pair "static_chain_decl" + (!type already_seen 23) + (!srcfileloc "function.h" 280) + nil ) + (!pair "nonlocal_goto_save_area" + (!type already_seen 23) + (!srcfileloc "function.h" 285) + nil ) + (!pair "local_decls" + (!type already_seen 84) + (!srcfileloc "function.h" 288) + nil ) + (!pair "machine" + (!type pointer 420 nil gc_unused + (!type struct 421 + (!type already_seen 420) + gc_maybe_pointed_to "machine_function" + (!srcfileloc "config/i386/i386.h" 2749) + (!fields 31 + (!pair "stack_locals" + (!type pointer 422 nil gc_used + (!type struct 423 + (!type already_seen 422) + gc_pointed_to "stack_local_entry" + (!srcfileloc "config/i386/i386.cc" 339) + (!fields 4 + (!pair "mode" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.cc" 335) + nil ) + (!pair "n" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.cc" 336) + nil ) + (!pair "rtl" + (!type already_seen 99) + (!srcfileloc "config/i386/i386.cc" 337) + nil ) + (!pair "next" + (!type already_seen 422) + (!srcfileloc "config/i386/i386.cc" 338) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "config/i386/i386.h" 2635) + nil ) + (!pair "varargs_gpr_size" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2636) + nil ) + (!pair "varargs_fpr_size" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2637) + nil ) + (!pair "optimize_mode_switching" + (!type array 424 nil gc_used "MAX_386_ENTITIES" + (!type already_seen 2) + ) + (!srcfileloc "config/i386/i386.h" 2638) + nil ) + (!pair "frame" + (!type struct 425 nil gc_used "ix86_frame" + (!srcfileloc "config/i386/i386.h" 2553) + (!fields 16 + (!pair "nsseregs" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2529) + nil ) + (!pair "nregs" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2530) + nil ) + (!pair "va_arg_size" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2531) + nil ) + (!pair "red_zone_size" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2532) + nil ) + (!pair "outgoing_arguments_size" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2533) + nil ) + (!pair "frame_pointer_offset" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2536) + nil ) + (!pair "hard_frame_pointer_offset" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2537) + nil ) + (!pair "stack_pointer_offset" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2538) + nil ) + (!pair "hfp_save_offset" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2539) + nil ) + (!pair "reg_save_offset" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2540) + nil ) + (!pair "stack_realign_allocate" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2541) + nil ) + (!pair "stack_realign_offset" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2542) + nil ) + (!pair "sse_reg_save_offset" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2543) + nil ) + (!pair "save_regs_using_mov" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2547) + nil ) + (!pair "expensive_p" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2551) + nil ) + (!pair "expensive_count" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2552) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "config/i386/i386.h" 2641) + nil ) + (!pair "split_stack_varargs_pointer" + (!type already_seen 99) + (!srcfileloc "config/i386/i386.h" 2648) + nil ) + (!pair "call_abi" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2652) + nil ) + (!pair "accesses_prev_frame" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2655) + nil ) + (!pair "use_fast_prologue_epilogue" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2659) + nil ) + (!pair "pc_thunk_call_expanded" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2663) + nil ) + (!pair "tls_descriptor_call_expanded_p" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2674) + nil ) + (!pair "static_chain_on_stack" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2678) + nil ) + (!pair "no_drap_save_restore" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2681) + nil ) + (!pair "func_type" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2684) + nil ) + (!pair "indirect_branch_type" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2687) + nil ) + (!pair "has_local_indirect_jump" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2691) + nil ) + (!pair "function_return_type" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2694) + nil ) + (!pair "no_caller_saved_registers" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2698) + nil ) + (!pair "arg_reg_available" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2705) + nil ) + (!pair "call_ms2sysv" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2709) + nil ) + (!pair "call_ms2sysv_pad_in" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2713) + nil ) + (!pair "call_ms2sysv_extra_regs" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2719) + nil ) + (!pair "outgoing_args_on_stack" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2722) + nil ) + (!pair "insn_queued_at_entrance" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2725) + nil ) + (!pair "function_label_emitted" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2728) + nil ) + (!pair "stack_frame_required" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2731) + nil ) + (!pair "silent_p" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2735) + nil ) + (!pair "red_zone_used" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2738) + nil ) + (!pair "max_used_stack_alignment" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2741) + nil ) + (!pair "fs" + (!type struct 426 nil gc_used "machine_frame_state" + (!srcfileloc "config/i386/i386.h" 2609) + (!fields 12 + (!pair "cfa_reg" + (!type already_seen 99) + (!srcfileloc "config/i386/i386.h" 2564) + nil ) + (!pair "cfa_offset" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2565) + nil ) + (!pair "sp_offset" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2571) + nil ) + (!pair "fp_offset" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2572) + nil ) + (!pair "red_zone_offset" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2578) + nil ) + (!pair "sp_valid" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2584) + nil ) + (!pair "fp_valid" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2585) + nil ) + (!pair "drap_valid" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2586) + nil ) + (!pair "realigned" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2591) + nil ) + (!pair "sp_realigned" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2598) + nil ) + (!pair "sp_realigned_fp_last" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2602) + nil ) + (!pair "sp_realigned_offset" + (!type already_seen 2) + (!srcfileloc "config/i386/i386.h" 2608) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "config/i386/i386.h" 2745) + nil ) + (!pair "seh" + (!type pointer 427 nil gc_unused + (!type struct 428 + (!type already_seen 427) + gc_unused "seh_frame_state"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "config/i386/i386.h" 2748) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "function.h" 293) + (!options + (!option maybe_undef string "") + ) + ) + (!pair "language" + (!type pointer 429 nil gc_used + (!type lang_struct 430 + (!type already_seen 429) + gc_pointed_to "language_function" + (!srcfileloc "ada/gcc-interface/trans.cc" 121) + (!fields 0 ) + nil 1023 + (!homotypes 8 + (!type struct 431 nil gc_pointed_to "language_function" + (!srcfileloc "lto/lto-tree.h" 42) + (!fields 1 + (!pair "dummy" + (!type already_seen 2) + (!srcfileloc "lto/lto-tree.h" 41) + nil ) + ) + nil 128 + (!type already_seen 430) + nil ) + + (!type struct 432 nil gc_pointed_to "language_function" + (!srcfileloc "jit/dummy-frontend.cc" 523) + (!fields 1 + (!pair "dummy" + (!type already_seen 2) + (!srcfileloc "jit/dummy-frontend.cc" 522) + nil ) + ) + nil 64 + (!type already_seen 430) + nil ) + + (!type struct 433 nil gc_pointed_to "language_function" + (!srcfileloc "go/go-lang.cc" 84) + (!fields 1 + (!pair "dummy" + (!type already_seen 2) + (!srcfileloc "go/go-lang.cc" 83) + nil ) + ) + nil 32 + (!type already_seen 430) + nil ) + + (!type struct 434 nil gc_pointed_to "language_function" + (!srcfileloc "fortran/f95-lang.cc" 68) + (!fields 1 + (!pair "binding_level" + (!type pointer 435 nil gc_used + (!type lang_struct 436 + (!type already_seen 435) + gc_pointed_to "binding_level" + (!srcfileloc "d/d-tree.h" 124) + (!fields 0 ) + (!options + (!option chain_next string "%h.level_chain") + ) + 24 + (!homotypes 2 + (!type struct 437 nil gc_pointed_to "binding_level" + (!srcfileloc "fortran/f95-lang.cc" 310) + (!fields 4 + (!pair "names" + (!type already_seen 23) + (!srcfileloc "fortran/f95-lang.cc" 301) + nil ) + (!pair "blocks" + (!type already_seen 23) + (!srcfileloc "fortran/f95-lang.cc" 304) + nil ) + (!pair "level_chain" + (!type already_seen 435) + (!srcfileloc "fortran/f95-lang.cc" 306) + nil ) + (!pair "reversed" + (!type already_seen 2) + (!srcfileloc "fortran/f95-lang.cc" 309) + nil ) + ) + nil 16 + (!type already_seen 436) + nil ) + + (!type struct 438 nil gc_pointed_to "binding_level" + (!srcfileloc "d/d-tree.h" 124) + (!fields 4 + (!pair "names" + (!type already_seen 23) + (!srcfileloc "d/d-tree.h" 113) + nil ) + (!pair "blocks" + (!type already_seen 23) + (!srcfileloc "d/d-tree.h" 117) + nil ) + (!pair "level_chain" + (!type already_seen 435) + (!srcfileloc "d/d-tree.h" 120) + nil ) + (!pair "kind" + (!type already_seen 2) + (!srcfileloc "d/d-tree.h" 123) + nil ) + ) + (!options + (!option chain_next string "%h.level_chain") + ) + 8 + (!type already_seen 436) + nil ) + ) + ) + ) + (!srcfileloc "fortran/f95-lang.cc" 67) + nil ) + ) + nil 16 + (!type already_seen 430) + nil ) + + (!type struct 439 nil gc_pointed_to "language_function" + (!srcfileloc "d/d-tree.h" 251) + (!fields 6 + (!pair "function" + (!type pointer 440 nil gc_unused + (!type struct 441 + (!type already_seen 440) + gc_unused "FuncDeclaration" + (!srcfileloc "d/d-tree.h" 236) + (!fields 0 ) + nil 8 nil nil ) + ) + (!srcfileloc "d/d-tree.h" 236) + (!options + (!option skip string "") + ) + ) + (!pair "module" + (!type pointer 442 nil gc_unused + (!type struct 443 + (!type already_seen 442) + gc_unused "Module" + (!srcfileloc "d/d-tree.h" 237) + (!fields 0 ) + nil 8 nil nil ) + ) + (!srcfileloc "d/d-tree.h" 237) + (!options + (!option skip string "") + ) + ) + (!pair "static_chain" + (!type already_seen 23) + (!srcfileloc "d/d-tree.h" 240) + nil ) + (!pair "stmt_list" + (!type already_seen 84) + (!srcfileloc "d/d-tree.h" 244) + nil ) + (!pair "vars_in_scope" + (!type already_seen 84) + (!srcfileloc "d/d-tree.h" 247) + nil ) + (!pair "labels" + (!type pointer 444 nil gc_used + (!type user_struct 445 + (!type already_seen 444) + gc_pointed_to "hash_map" + (!srcfileloc "d/d-tree.h" 250) + (!fields 2 + (!pair "d_label_entry" + (!type struct 446 nil gc_used "d_label_entry" + (!srcfileloc "d/d-tree.h" 250) + (!fields 7 + (!pair "label" + (!type already_seen 23) + (!srcfileloc "d/d-tree.h" 153) + nil ) + (!pair "statement" + (!type pointer 447 nil gc_used + (!type struct 448 + (!type already_seen 447) + gc_pointed_to "Statement" + (!srcfileloc "d/d-tree.h" 138) + (!fields 0 ) + nil 8 nil nil ) + ) + (!srcfileloc "d/d-tree.h" 156) + (!options + (!option skip string "") + ) + ) + (!pair "level" + (!type already_seen 435) + (!srcfileloc "d/d-tree.h" 161) + nil ) + (!pair "fwdrefs" + (!type pointer 449 nil gc_used + (!type struct 450 + (!type already_seen 449) + gc_pointed_to "d_label_use_entry" + (!srcfileloc "d/d-tree.h" 144) + (!fields 3 + (!pair "next" + (!type already_seen 449) + (!srcfileloc "d/d-tree.h" 135) + nil ) + (!pair "statement" + (!type already_seen 447) + (!srcfileloc "d/d-tree.h" 138) + (!options + (!option skip string "") + ) + ) + (!pair "level" + (!type already_seen 435) + (!srcfileloc "d/d-tree.h" 143) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 8 nil nil ) + ) + (!srcfileloc "d/d-tree.h" 164) + nil ) + (!pair "in_try_scope" + (!type already_seen 2) + (!srcfileloc "d/d-tree.h" 169) + nil ) + (!pair "in_catch_scope" + (!type already_seen 2) + (!srcfileloc "d/d-tree.h" 170) + nil ) + (!pair "bc_label" + (!type already_seen 2) + (!srcfileloc "d/d-tree.h" 173) + nil ) + ) + nil 8 nil nil ) + (!srcfileloc "d/d-tree.h" 250) + nil ) + (!pair "Statement" + (!type already_seen 447) + (!srcfileloc "d/d-tree.h" 250) + nil ) + ) + ) + ) + (!srcfileloc "d/d-tree.h" 250) + nil ) + ) + nil 8 + (!type already_seen 430) + nil ) + + (!type struct 451 nil gc_pointed_to "language_function" + (!srcfileloc "cp/cp-tree.h" 2088) + (!fields 20 + (!pair "base" + (!type struct 452 nil gc_used "c_language_function" + (!srcfileloc "c-family/c-common.h" 601) + (!fields 2 + (!pair "x_stmt_tree" + (!type struct 453 + (!type pointer 454 nil gc_unused + (!type already_seen 453) + ) + gc_used "stmt_tree_s" + (!srcfileloc "c-family/c-common.h" 586) + (!fields 2 + (!pair "x_cur_stmt_list" + (!type already_seen 84) + (!srcfileloc "c-family/c-common.h" 574) + nil ) + (!pair "stmts_are_full_exprs_p" + (!type already_seen 2) + (!srcfileloc "c-family/c-common.h" 585) + nil ) + ) + nil 774 nil nil ) + (!srcfileloc "c-family/c-common.h" 596) + nil ) + (!pair "local_typedefs" + (!type already_seen 84) + (!srcfileloc "c-family/c-common.h" 600) + nil ) + ) + nil 774 nil nil ) + (!srcfileloc "cp/cp-tree.h" 2058) + nil ) + (!pair "x_cdtor_label" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2060) + nil ) + (!pair "x_current_class_ptr" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2061) + nil ) + (!pair "x_current_class_ref" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2062) + nil ) + (!pair "x_eh_spec_block" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2063) + nil ) + (!pair "x_in_charge_parm" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2064) + nil ) + (!pair "x_vtt_parm" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2065) + nil ) + (!pair "x_return_value" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2066) + nil ) + (!pair "returns_value" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2068) + nil ) + (!pair "returns_null" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2069) + nil ) + (!pair "returns_abnormally" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2070) + nil ) + (!pair "infinite_loop" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2071) + nil ) + (!pair "x_in_function_try_handler" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2072) + nil ) + (!pair "x_in_base_initializer" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2073) + nil ) + (!pair "can_throw" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2076) + nil ) + (!pair "invalid_constexpr" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2078) + nil ) + (!pair "throwing_cleanup" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2079) + nil ) + (!pair "x_named_labels" + (!type pointer 455 nil gc_used + (!type user_struct 456 + (!type already_seen 455) + gc_pointed_to "hash_table" + (!srcfileloc "cp/cp-tree.h" 2081) + (!fields 1 + (!pair "named_label_hash" + (!type struct 457 nil gc_used "named_label_hash" + (!srcfileloc "cp/cp-tree.h" 2081) + (!fields 0 ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 2081) + nil ) + ) + ) + ) + (!srcfileloc "cp/cp-tree.h" 2081) + nil ) + (!pair "bindings" + (!type already_seen 82) + (!srcfileloc "cp/cp-tree.h" 2083) + nil ) + (!pair "infinite_loops" + (!type already_seen 84) + (!srcfileloc "cp/cp-tree.h" 2087) + nil ) + ) + nil 516 + (!type already_seen 430) + nil ) + + (!type struct 458 nil gc_pointed_to "language_function" + (!srcfileloc "c/c-lang.h" 61) + (!fields 8 + (!pair "base" + (!type already_seen 452) + (!srcfileloc "c/c-lang.h" 53) + nil ) + (!pair "x_in_statement" + (!type already_seen 8) + (!srcfileloc "c/c-lang.h" 54) + nil ) + (!pair "x_switch_stack" + (!type pointer 459 nil gc_unused + (!type struct 460 + (!type already_seen 459) + gc_unused "c_switch"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "c/c-lang.h" 55) + (!options + (!option skip string "") + ) + ) + (!pair "arg_info" + (!type pointer 461 nil gc_unused + (!type struct 462 + (!type already_seen 461) + gc_unused "c_arg_info"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "c/c-lang.h" 56) + (!options + (!option skip string "") + ) + ) + (!pair "returns_value" + (!type already_seen 2) + (!srcfileloc "c/c-lang.h" 57) + nil ) + (!pair "returns_null" + (!type already_seen 2) + (!srcfileloc "c/c-lang.h" 58) + nil ) + (!pair "returns_abnormally" + (!type already_seen 2) + (!srcfileloc "c/c-lang.h" 59) + nil ) + (!pair "warn_about_return_type" + (!type already_seen 2) + (!srcfileloc "c/c-lang.h" 60) + nil ) + ) + nil 258 + (!type already_seen 430) + nil ) + + (!type struct 463 nil gc_pointed_to "language_function" + (!srcfileloc "ada/gcc-interface/trans.cc" 121) + (!fields 4 + (!pair "parm_attr_cache" + (!type pointer 464 nil gc_used + (!type user_struct 465 + (!type already_seen 464) + gc_pointed_to "vec" + (!srcfileloc "ada/gcc-interface/trans.cc" 117) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ada/gcc-interface/trans.cc" 117) + nil ) + (!pair "parm_attr" + (!type pointer 466 nil gc_used + (!type struct 467 + (!type already_seen 466) + gc_pointed_to "parm_attr_d" + (!srcfileloc "ada/gcc-interface/trans.cc" 111) + (!fields 5 + (!pair "id" + (!type already_seen 2) + (!srcfileloc "ada/gcc-interface/trans.cc" 106) + nil ) + (!pair "dim" + (!type already_seen 2) + (!srcfileloc "ada/gcc-interface/trans.cc" 107) + nil ) + (!pair "first" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 108) + nil ) + (!pair "last" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 109) + nil ) + (!pair "length" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 110) + nil ) + ) + nil 1 nil nil ) + ) + (!srcfileloc "ada/gcc-interface/trans.cc" 117) + nil ) + ) + ) + ) + (!srcfileloc "ada/gcc-interface/trans.cc" 117) + nil ) + (!pair "named_ret_val" + (!type already_seen 386) + (!srcfileloc "ada/gcc-interface/trans.cc" 118) + nil ) + (!pair "other_ret_val" + (!type already_seen 84) + (!srcfileloc "ada/gcc-interface/trans.cc" 119) + nil ) + (!pair "gnat_ret" + (!type already_seen 2) + (!srcfileloc "ada/gcc-interface/trans.cc" 120) + nil ) + ) + nil 1 + (!type already_seen 430) + nil ) + ) + ) + ) + (!srcfileloc "function.h" 296) + nil ) + (!pair "used_types_hash" + (!type pointer 468 nil gc_used + (!type user_struct 469 + (!type already_seen 468) + gc_pointed_to "hash_set" + (!srcfileloc "function.h" 299) + (!fields 1 + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "function.h" 299) + nil ) + ) + ) + ) + (!srcfileloc "function.h" 299) + nil ) + (!pair "fde" + (!type pointer 470 nil gc_used + (!type struct 471 + (!type already_seen 470) + gc_pointed_to "dw_fde_node" + (!srcfileloc "dwarf2out.h" 119) + (!fields 24 + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "dwarf2out.h" 79) + nil ) + (!pair "dw_fde_begin" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 80) + nil ) + (!pair "dw_fde_current_label" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 81) + nil ) + (!pair "dw_fde_end" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 82) + nil ) + (!pair "dw_fde_vms_end_prologue" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 83) + nil ) + (!pair "dw_fde_vms_begin_epilogue" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 84) + nil ) + (!pair "dw_fde_second_begin" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 85) + nil ) + (!pair "dw_fde_second_end" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 86) + nil ) + (!pair "dw_fde_cfi" + (!type pointer 472 nil gc_used + (!type user_struct 473 + (!type already_seen 472) + gc_pointed_to "vec" + (!srcfileloc "dwarf2out.h" 68) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "dwarf2out.h" 68) + nil ) + (!pair "dw_cfi_ref" + (!type pointer 474 nil gc_used + (!type struct 475 + (!type already_seen 474) + gc_pointed_to "dw_cfi_node" + (!srcfileloc "dwarf2out.h" 65) + (!fields 3 + (!pair "dw_cfi_opc" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 60) + nil ) + (!pair "dw_cfi_oprnd1" + (!type union 476 nil gc_used "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/dwarf2out.h:50" + (!srcfileloc "dwarf2out.h" 57) + (!fields 5 + (!pair "dw_cfi_reg_num" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 51) + (!options + (!option tag string "dw_cfi_oprnd_reg_num") + ) + ) + (!pair "dw_cfi_offset" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 52) + (!options + (!option tag string "dw_cfi_oprnd_offset") + ) + ) + (!pair "dw_cfi_addr" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 53) + (!options + (!option tag string "dw_cfi_oprnd_addr") + ) + ) + (!pair "dw_cfi_loc" + (!type pointer 477 nil gc_used + (!type struct 478 + (!type already_seen 477) + gc_pointed_to "dw_loc_descr_node" + (!srcfileloc "dwarf2out.h" 299) + (!fields 7 + (!pair "dw_loc_next" + (!type already_seen 477) + (!srcfileloc "dwarf2out.h" 287) + nil ) + (!pair "dw_loc_opc" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 288) + nil ) + (!pair "dtprel" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 291) + nil ) + (!pair "frame_offset_rel" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 295) + nil ) + (!pair "dw_loc_addr" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 296) + nil ) + (!pair "dw_loc_oprnd1" + (!type struct 479 + (!type pointer 480 nil gc_unused + (!type already_seen 479) + ) + gc_used "dw_val_node" + (!srcfileloc "dwarf2out.h" 297) + (!fields 3 + (!pair "val_class" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 242) + nil ) + (!pair "val_entry" + (!type pointer 481 nil gc_used + (!type struct 482 + (!type already_seen 481) + gc_pointed_to "addr_table_entry" + (!srcfileloc "dwarf2out.cc" 1364) + (!fields 4 + (!pair "kind" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 1345) + nil ) + (!pair "refcount" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 1346) + nil ) + (!pair "index" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 1347) + nil ) + (!pair "addr" + (!type union 483 nil gc_used "addr_table_entry_struct_union" + (!srcfileloc "dwarf2out.cc" 1352) + (!fields 2 + (!pair "rtl" + (!type already_seen 99) + (!srcfileloc "dwarf2out.cc" 1350) + (!options + (!option tag string "0") + ) + ) + (!pair "label" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 1351) + (!options + (!option tag string "1") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "dwarf2out.cc" 1353) + (!options + (!option desc string "%1.kind") + ) + ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + ) + (!srcfileloc "dwarf2out.h" 243) + nil ) + (!pair "v" + (!type union 484 nil gc_used "dw_val_struct_union" + (!srcfileloc "dwarf2out.h" 279) + (!fields 23 + (!pair "val_addr" + (!type already_seen 99) + (!srcfileloc "dwarf2out.h" 246) + (!options + (!option tag string "dw_val_class_addr") + ) + ) + (!pair "val_offset" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 247) + (!options + (!option tag string "dw_val_class_offset") + ) + ) + (!pair "val_loc_list" + (!type pointer 485 nil gc_used + (!type struct 486 + (!type already_seen 485) + gc_pointed_to "dw_loc_list_struct" + (!srcfileloc "dwarf2out.cc" 1391) + (!fields 19 + (!pair "dw_loc_next" + (!type already_seen 485) + (!srcfileloc "dwarf2out.cc" 1362) + nil ) + (!pair "begin" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 1363) + nil ) + (!pair "begin_entry" + (!type already_seen 481) + (!srcfileloc "dwarf2out.cc" 1364) + nil ) + (!pair "end" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 1365) + nil ) + (!pair "end_entry" + (!type already_seen 481) + (!srcfileloc "dwarf2out.cc" 1366) + nil ) + (!pair "ll_symbol" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 1367) + nil ) + (!pair "vl_symbol" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 1369) + nil ) + (!pair "section" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 1370) + nil ) + (!pair "expr" + (!type already_seen 477) + (!srcfileloc "dwarf2out.cc" 1371) + nil ) + (!pair "vbegin" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 1372) + nil ) + (!pair "vend" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 1372) + nil ) + (!pair "hash" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 1373) + nil ) + (!pair "resolved_addr" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 1376) + nil ) + (!pair "replaced" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 1378) + nil ) + (!pair "emitted" + (!type already_seen 8) + (!srcfileloc "dwarf2out.cc" 1381) + nil ) + (!pair "num_assigned" + (!type already_seen 8) + (!srcfileloc "dwarf2out.cc" 1383) + nil ) + (!pair "offset_emitted" + (!type already_seen 8) + (!srcfileloc "dwarf2out.cc" 1385) + nil ) + (!pair "noted_variable_value" + (!type already_seen 8) + (!srcfileloc "dwarf2out.cc" 1387) + nil ) + (!pair "force" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 1390) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "dwarf2out.h" 248) + (!options + (!option tag string "dw_val_class_loc_list") + ) + ) + (!pair "val_view_list" + (!type pointer 487 nil gc_used + (!type struct 488 + (!type already_seen 487) + gc_pointed_to "die_struct" + (!srcfileloc "dwarf2out.cc" 3197) + (!fields 15 + (!pair "die_id" + (!type union 489 nil gc_used "die_symbol_or_type_node" + (!srcfileloc "dwarf2out.cc" 3174) + (!fields 2 + (!pair "die_symbol" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 3172) + (!options + (!option tag string "0") + ) + ) + (!pair "die_type_node" + (!type pointer 490 nil gc_used + (!type struct 491 + (!type already_seen 490) + gc_pointed_to "comdat_type_node" + (!srcfileloc "dwarf2out.cc" 3273) + (!fields 5 + (!pair "root_die" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 3268) + nil ) + (!pair "type_die" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 3269) + nil ) + (!pair "skeleton_die" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 3270) + nil ) + (!pair "signature" + (!type array 492 nil gc_used "DWARF_TYPE_SIGNATURE_SIZE" + (!type already_seen 8) + ) + (!srcfileloc "dwarf2out.cc" 3271) + nil ) + (!pair "next" + (!type already_seen 490) + (!srcfileloc "dwarf2out.cc" 3272) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "dwarf2out.cc" 3173) + (!options + (!option tag string "1") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "dwarf2out.cc" 3175) + (!options + (!option desc string "%0.comdat_type_p") + ) + ) + (!pair "die_attr" + (!type pointer 493 nil gc_used + (!type user_struct 494 + (!type already_seen 493) + gc_pointed_to "vec" + (!srcfileloc "dwarf2out.cc" 3176) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "dwarf2out.cc" 3176) + nil ) + (!pair "dw_attr_node" + (!type struct 495 nil gc_used "dw_attr_struct" + (!srcfileloc "dwarf2out.h" 434) + (!fields 2 + (!pair "dw_attr" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 432) + nil ) + (!pair "dw_attr_val" + (!type already_seen 479) + (!srcfileloc "dwarf2out.h" 433) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "dwarf2out.cc" 3176) + nil ) + ) + ) + ) + (!srcfileloc "dwarf2out.cc" 3176) + nil ) + (!pair "die_parent" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 3177) + nil ) + (!pair "die_child" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 3178) + nil ) + (!pair "die_sib" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 3179) + nil ) + (!pair "die_definition" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 3180) + nil ) + (!pair "die_offset" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3181) + nil ) + (!pair "die_abbrev" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3182) + nil ) + (!pair "die_mark" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3183) + nil ) + (!pair "decl_id" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3184) + nil ) + (!pair "die_tag" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3185) + nil ) + (!pair "die_perennial_p" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3187) + nil ) + (!pair "comdat_type_p" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3188) + nil ) + (!pair "with_offset" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3191) + nil ) + (!pair "removed" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3195) + nil ) + ) + (!options + (!option for_user string "") + (!option chain_circular string "%h.die_sib") + ) + 1023 nil nil ) + ) + (!srcfileloc "dwarf2out.h" 249) + (!options + (!option tag string "dw_val_class_view_list") + ) + ) + (!pair "val_loc" + (!type already_seen 477) + (!srcfileloc "dwarf2out.h" 250) + (!options + (!option tag string "dw_val_class_loc") + ) + ) + (!pair "val_int" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 251) + (!options + (!option default string "") + ) + ) + (!pair "val_unsigned" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 253) + (!options + (!option tag string "dw_val_class_unsigned_const") + ) + ) + (!pair "val_double" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 254) + (!options + (!option tag string "dw_val_class_const_double") + ) + ) + (!pair "val_wide" + (!type pointer 496 nil gc_used + (!type user_struct 497 + (!type already_seen 496) + gc_pointed_to "generic_wide_int" + (!srcfileloc "wide-int.h" 322) + (!fields 1 + (!pair "wide_int_storage" + (!type struct 498 nil gc_used "wide_int_storage" + (!srcfileloc "wide-int.h" 1088) + (!fields 3 + (!pair "val" + (!type array 499 nil gc_used "WIDE_INT_MAX_ELTS" + (!type already_seen 2) + ) + (!srcfileloc "wide-int.h" 1062) + nil ) + (!pair "len" + (!type already_seen 2) + (!srcfileloc "wide-int.h" 1063) + nil ) + (!pair "precision" + (!type already_seen 2) + (!srcfileloc "wide-int.h" 1064) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "wide-int.h" 322) + nil ) + ) + ) + ) + (!srcfileloc "dwarf2out.h" 255) + (!options + (!option tag string "dw_val_class_wide_int") + ) + ) + (!pair "val_vec" + (!type struct 500 nil gc_used "dw_vec_const" + (!srcfileloc "dwarf2out.h" 256) + (!fields 3 + (!pair "array" + (!type already_seen 3) + (!srcfileloc "dwarf2out.h" 212) + (!options + (!option atomic string "") + ) + ) + (!pair "length" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 213) + nil ) + (!pair "elt_size" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 214) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "dwarf2out.h" 256) + (!options + (!option tag string "dw_val_class_vec") + ) + ) + (!pair "val_die_ref" + (!type struct 501 nil gc_used "dw_val_die_union" + (!srcfileloc "dwarf2out.h" 261) + (!fields 2 + (!pair "die" + (!type already_seen 487) + (!srcfileloc "dwarf2out.h" 259) + nil ) + (!pair "external" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 260) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "dwarf2out.h" 261) + (!options + (!option tag string "dw_val_class_die_ref") + ) + ) + (!pair "val_fde_index" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 262) + (!options + (!option tag string "dw_val_class_fde_ref") + ) + ) + (!pair "val_str" + (!type pointer 502 nil gc_used + (!type struct 503 + (!type already_seen 502) + gc_pointed_to "indirect_string_node" + (!srcfileloc "dwarf2out.cc" 223) + (!fields 5 + (!pair "str" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 218) + nil ) + (!pair "refcount" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 219) + nil ) + (!pair "form" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 220) + nil ) + (!pair "label" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 221) + nil ) + (!pair "index" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 222) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + ) + (!srcfileloc "dwarf2out.h" 263) + (!options + (!option tag string "dw_val_class_str") + ) + ) + (!pair "val_lbl_id" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 264) + (!options + (!option tag string "dw_val_class_lbl_id") + ) + ) + (!pair "val_flag" + (!type already_seen 8) + (!srcfileloc "dwarf2out.h" 265) + (!options + (!option tag string "dw_val_class_flag") + ) + ) + (!pair "val_file" + (!type pointer 504 nil gc_used + (!type struct 505 + (!type already_seen 504) + gc_pointed_to "dwarf_file_data" + (!srcfileloc "dwarf2out.h" 465) + (!fields 3 + (!pair "key" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 462) + nil ) + (!pair "filename" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 463) + nil ) + (!pair "emitted_number" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 464) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + ) + (!srcfileloc "dwarf2out.h" 266) + (!options + (!option tag string "dw_val_class_file") + ) + ) + (!pair "val_file_implicit" + (!type already_seen 504) + (!srcfileloc "dwarf2out.h" 268) + (!options + (!option tag string "dw_val_class_file_implicit") + ) + ) + (!pair "val_data8" + (!type array 506 nil gc_used "8" + (!type already_seen 8) + ) + (!srcfileloc "dwarf2out.h" 269) + (!options + (!option tag string "dw_val_class_data8") + ) + ) + (!pair "val_decl_ref" + (!type already_seen 23) + (!srcfileloc "dwarf2out.h" 270) + (!options + (!option tag string "dw_val_class_decl_ref") + ) + ) + (!pair "val_vms_delta" + (!type struct 507 nil gc_used "dw_val_vms_delta_union" + (!srcfileloc "dwarf2out.h" 275) + (!fields 2 + (!pair "lbl1" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 273) + nil ) + (!pair "lbl2" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 274) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "dwarf2out.h" 275) + (!options + (!option tag string "dw_val_class_vms_delta") + ) + ) + (!pair "val_discr_value" + (!type struct 508 nil gc_used "dw_discr_value" + (!srcfileloc "dwarf2out.h" 276) + (!fields 2 + (!pair "pos" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 227) + nil ) + (!pair "v" + (!type union 509 nil gc_used "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/dwarf2out.h:229" + (!srcfileloc "dwarf2out.h" 232) + (!fields 2 + (!pair "sval" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 230) + (!options + (!option tag string "0") + ) + ) + (!pair "uval" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 231) + (!options + (!option tag string "1") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "dwarf2out.h" 233) + (!options + (!option desc string "%1.pos") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "dwarf2out.h" 276) + (!options + (!option tag string "dw_val_class_discr_value") + ) + ) + (!pair "val_discr_list" + (!type pointer 510 nil gc_used + (!type struct 511 + (!type already_seen 510) + gc_pointed_to "dw_discr_list_node" + (!srcfileloc "dwarf2out.h" 314) + (!fields 4 + (!pair "dw_discr_next" + (!type already_seen 510) + (!srcfileloc "dwarf2out.h" 306) + nil ) + (!pair "dw_discr_lower_bound" + (!type already_seen 508) + (!srcfileloc "dwarf2out.h" 308) + nil ) + (!pair "dw_discr_upper_bound" + (!type already_seen 508) + (!srcfileloc "dwarf2out.h" 309) + nil ) + (!pair "dw_discr_range" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 313) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "dwarf2out.h" 277) + (!options + (!option tag string "dw_val_class_discr_list") + ) + ) + (!pair "val_symbolic_view" + (!type already_seen 11) + (!srcfileloc "dwarf2out.h" 278) + (!options + (!option tag string "dw_val_class_symview") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "dwarf2out.h" 280) + (!options + (!option desc string "%1.val_class") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "dwarf2out.h" 297) + nil ) + (!pair "dw_loc_oprnd2" + (!type already_seen 479) + (!srcfileloc "dwarf2out.h" 298) + nil ) + ) + (!options + (!option chain_next string "%h.dw_loc_next") + ) + 1023 nil nil ) + ) + (!srcfileloc "dwarf2out.h" 54) + (!options + (!option tag string "dw_cfi_oprnd_loc") + ) + ) + (!pair "dw_cfi_cfa_loc" + (!type pointer 512 nil gc_used + (!type struct 513 + (!type already_seen 512) + gc_pointed_to "dw_cfa_location" + (!srcfileloc "dwarf2cfi.cc" 66) + (!fields 5 + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 161) + nil ) + (!pair "base_offset" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 162) + nil ) + (!pair "reg" + (!type struct 514 nil gc_used "cfa_reg" + (!srcfileloc "dwarf2out.h" 153) + (!fields 3 + (!pair "reg" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 129) + nil ) + (!pair "span" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 130) + nil ) + (!pair "span_width" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 131) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "dwarf2out.h" 164) + nil ) + (!pair "indirect" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 165) + nil ) + (!pair "in_use" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 166) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "dwarf2out.h" 56) + (!options + (!option tag string "dw_cfi_oprnd_cfa_loc") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "dwarf2out.h" 62) + (!options + (!option desc string "dw_cfi_oprnd1_desc (%1.dw_cfi_opc)") + ) + ) + (!pair "dw_cfi_oprnd2" + (!type already_seen 476) + (!srcfileloc "dwarf2out.h" 64) + (!options + (!option desc string "dw_cfi_oprnd2_desc (%1.dw_cfi_opc)") + ) + ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "dwarf2out.h" 68) + nil ) + ) + ) + ) + (!srcfileloc "dwarf2out.h" 87) + nil ) + (!pair "dw_fde_switch_cfi_index" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 88) + nil ) + (!pair "stack_realignment" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 89) + nil ) + (!pair "funcdef_number" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 91) + nil ) + (!pair "fde_index" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 92) + nil ) + (!pair "drap_reg" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 95) + nil ) + (!pair "vdrap_reg" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 97) + nil ) + (!pair "all_throwers_are_sibcalls" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 99) + nil ) + (!pair "uses_eh_lsda" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 100) + nil ) + (!pair "nothrow" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 101) + nil ) + (!pair "stack_realign" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 103) + nil ) + (!pair "drap_reg_saved" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 105) + nil ) + (!pair "in_std_section" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 107) + nil ) + (!pair "second_in_std_section" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 110) + nil ) + (!pair "rule18" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 116) + nil ) + (!pair "ignored_debug" + (!type already_seen 2) + (!srcfileloc "dwarf2out.h" 118) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "function.h" 304) + nil ) + (!pair "x_range_query" + (!type pointer 515 nil gc_unused + (!type struct 516 + (!type already_seen 515) + gc_unused "range_query" + (!srcfileloc "function.h" 310) + (!fields 0 ) + nil 1023 nil nil ) + ) + (!srcfileloc "function.h" 310) + (!options + (!option skip string "") + ) + ) + (!pair "last_stmt_uid" + (!type already_seen 2) + (!srcfileloc "function.h" 313) + nil ) + (!pair "debug_marker_count" + (!type already_seen 2) + (!srcfileloc "function.h" 319) + nil ) + (!pair "funcdef_no" + (!type already_seen 2) + (!srcfileloc "function.h" 322) + nil ) + (!pair "function_start_locus" + (!type already_seen 2) + (!srcfileloc "function.h" 325) + nil ) + (!pair "function_end_locus" + (!type already_seen 2) + (!srcfileloc "function.h" 328) + nil ) + (!pair "curr_properties" + (!type already_seen 2) + (!srcfileloc "function.h" 331) + nil ) + (!pair "last_verified" + (!type already_seen 2) + (!srcfileloc "function.h" 332) + nil ) + (!pair "pending_TODOs" + (!type already_seen 2) + (!srcfileloc "function.h" 339) + nil ) + (!pair "cannot_be_copied_reason" + (!type already_seen 11) + (!srcfileloc "function.h" 344) + (!options + (!option skip string "") + ) + ) + (!pair "last_clique" + (!type already_seen 2) + (!srcfileloc "function.h" 347) + nil ) + (!pair "va_list_gpr_size" + (!type already_seen 2) + (!srcfileloc "function.h" 354) + nil ) + (!pair "va_list_fpr_size" + (!type already_seen 2) + (!srcfileloc "function.h" 358) + nil ) + (!pair "calls_setjmp" + (!type already_seen 2) + (!srcfileloc "function.h" 361) + nil ) + (!pair "calls_alloca" + (!type already_seen 2) + (!srcfileloc "function.h" 365) + nil ) + (!pair "calls_eh_return" + (!type already_seen 2) + (!srcfileloc "function.h" 368) + nil ) + (!pair "has_nonlocal_label" + (!type already_seen 2) + (!srcfileloc "function.h" 372) + nil ) + (!pair "has_forced_label_in_static" + (!type already_seen 2) + (!srcfileloc "function.h" 376) + nil ) + (!pair "cannot_be_copied_set" + (!type already_seen 2) + (!srcfileloc "function.h" 381) + nil ) + (!pair "stdarg" + (!type already_seen 2) + (!srcfileloc "function.h" 384) + nil ) + (!pair "after_inlining" + (!type already_seen 2) + (!srcfileloc "function.h" 386) + nil ) + (!pair "always_inline_functions_inlined" + (!type already_seen 2) + (!srcfileloc "function.h" 387) + nil ) + (!pair "can_throw_non_call_exceptions" + (!type already_seen 2) + (!srcfileloc "function.h" 391) + nil ) + (!pair "can_delete_dead_exceptions" + (!type already_seen 2) + (!srcfileloc "function.h" 395) + nil ) + (!pair "returns_struct" + (!type already_seen 2) + (!srcfileloc "function.h" 402) + nil ) + (!pair "returns_pcc_struct" + (!type already_seen 2) + (!srcfileloc "function.h" 406) + nil ) + (!pair "has_local_explicit_reg_vars" + (!type already_seen 2) + (!srcfileloc "function.h" 410) + nil ) + (!pair "is_thunk" + (!type already_seen 2) + (!srcfileloc "function.h" 416) + nil ) + (!pair "has_force_vectorize_loops" + (!type already_seen 2) + (!srcfileloc "function.h" 420) + nil ) + (!pair "has_simduid_loops" + (!type already_seen 2) + (!srcfileloc "function.h" 424) + nil ) + (!pair "tail_call_marked" + (!type already_seen 2) + (!srcfileloc "function.h" 427) + nil ) + (!pair "has_unroll" + (!type already_seen 2) + (!srcfileloc "function.h" 430) + nil ) + (!pair "debug_nonbind_markers" + (!type already_seen 2) + (!srcfileloc "function.h" 434) + nil ) + (!pair "coroutine_component" + (!type already_seen 2) + (!srcfileloc "function.h" 437) + nil ) + (!pair "has_omp_target" + (!type already_seen 2) + (!srcfileloc "function.h" 440) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "tree-core.h" 1931) + nil ) + (!pair "arguments" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1934) + nil ) + (!pair "personality" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1936) + nil ) + (!pair "function_specific_target" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1939) + nil ) + (!pair "function_specific_optimization" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1940) + nil ) + (!pair "saved_tree" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1943) + nil ) + (!pair "vindex" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1945) + nil ) + (!pair "function_code" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1948) + nil ) + (!pair "built_in_class" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1950) + nil ) + (!pair "static_ctor_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1951) + nil ) + (!pair "static_dtor_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1952) + nil ) + (!pair "uninlinable" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1953) + nil ) + (!pair "possibly_inlined" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1954) + nil ) + (!pair "novops_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1955) + nil ) + (!pair "returns_twice_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1956) + nil ) + (!pair "malloc_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1958) + nil ) + (!pair "declared_inline_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1959) + nil ) + (!pair "no_inline_warning_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1960) + nil ) + (!pair "no_instrument_function_entry_exit" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1961) + nil ) + (!pair "no_limit_stack" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1962) + nil ) + (!pair "disregard_inline_limits" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1963) + nil ) + (!pair "pure_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1964) + nil ) + (!pair "looping_const_or_pure_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1965) + nil ) + (!pair "decl_type" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1968) + nil ) + (!pair "has_debug_args_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1969) + nil ) + (!pair "versioned_function" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1970) + nil ) + (!pair "replaceable_operator" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1971) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2067) + (!options + (!option tag string "TS_FUNCTION_DECL") + ) + ) + (!pair "translation_unit_decl" + (!type struct 517 nil gc_used "tree_translation_unit_decl" + (!srcfileloc "tree-core.h" 1983) + (!fields 2 + (!pair "common" + (!type already_seen 53) + (!srcfileloc "tree-core.h" 1978) + nil ) + (!pair "language" + (!type already_seen 11) + (!srcfileloc "tree-core.h" 1980) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2069) + (!options + (!option tag string "TS_TRANSLATION_UNIT_DECL") + ) + ) + (!pair "type_common" + (!type struct 518 nil gc_used "tree_type_common" + (!srcfileloc "tree-core.h" 1717) + (!fields 36 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "tree-core.h" 1666) + nil ) + (!pair "size" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1667) + nil ) + (!pair "size_unit" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1668) + nil ) + (!pair "attributes" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1669) + nil ) + (!pair "uid" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1670) + nil ) + (!pair "precision" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1672) + nil ) + (!pair "no_force_blk_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1673) + nil ) + (!pair "needs_constructing_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1674) + nil ) + (!pair "transparent_aggr_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1675) + nil ) + (!pair "restrict_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1676) + nil ) + (!pair "contains_placeholder_bits" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1677) + nil ) + (!pair "mode" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1679) + nil ) + (!pair "string_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1683) + nil ) + (!pair "lang_flag_0" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1684) + nil ) + (!pair "lang_flag_1" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1685) + nil ) + (!pair "lang_flag_2" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1686) + nil ) + (!pair "lang_flag_3" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1687) + nil ) + (!pair "lang_flag_4" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1688) + nil ) + (!pair "lang_flag_5" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1689) + nil ) + (!pair "lang_flag_6" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1690) + nil ) + (!pair "lang_flag_7" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1691) + nil ) + (!pair "align" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1698) + nil ) + (!pair "warn_if_not_align" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1699) + nil ) + (!pair "typeless_storage" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1700) + nil ) + (!pair "empty_flag" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1701) + nil ) + (!pair "indivisible_p" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1702) + nil ) + (!pair "spare" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1703) + nil ) + (!pair "alias_set" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1705) + nil ) + (!pair "pointer_to" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1706) + nil ) + (!pair "reference_to" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1707) + nil ) + (!pair "symtab" + (!type union 519 nil gc_used "tree_type_symtab" + (!srcfileloc "tree-core.h" 1711) + (!fields 2 + (!pair "address" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1709) + (!options + (!option tag string "TYPE_SYMTAB_IS_ADDRESS") + ) + ) + (!pair "die" + (!type already_seen 487) + (!srcfileloc "tree-core.h" 1710) + (!options + (!option tag string "TYPE_SYMTAB_IS_DIE") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "tree-core.h" 1711) + (!options + (!option desc string "debug_hooks->tree_type_symtab_field") + ) + ) + (!pair "canonical" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1712) + nil ) + (!pair "next_variant" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1713) + nil ) + (!pair "main_variant" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1714) + nil ) + (!pair "context" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1715) + nil ) + (!pair "name" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1716) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2070) + (!options + (!option tag string "TS_TYPE_COMMON") + ) + ) + (!pair "type_with_lang_specific" + (!type struct 520 nil gc_used "tree_type_with_lang_specific" + (!srcfileloc "tree-core.h" 1723) + (!fields 2 + (!pair "common" + (!type already_seen 518) + (!srcfileloc "tree-core.h" 1720) + nil ) + (!pair "lang_specific" + (!type pointer 521 nil gc_used + (!type lang_struct 522 + (!type already_seen 521) + gc_pointed_to "lang_type" + (!srcfileloc "ada/gcc-interface/ada-tree.h" 36) + (!fields 0 ) + nil 1023 + (!homotypes 8 + (!type struct 523 nil gc_pointed_to "lang_type" + (!srcfileloc "lto/lto-tree.h" 37) + (!fields 1 + (!pair "dummy" + (!type already_seen 2) + (!srcfileloc "lto/lto-tree.h" 36) + nil ) + ) + nil 128 + (!type already_seen 522) + nil ) + + (!type struct 524 nil gc_pointed_to "lang_type" + (!srcfileloc "jit/dummy-frontend.cc" 490) + (!fields 1 + (!pair "dummy" + (!type already_seen 8) + (!srcfileloc "jit/dummy-frontend.cc" 489) + nil ) + ) + nil 64 + (!type already_seen 522) + nil ) + + (!type struct 525 nil gc_pointed_to "lang_type" + (!srcfileloc "go/go-lang.cc" 51) + (!fields 1 + (!pair "dummy" + (!type already_seen 8) + (!srcfileloc "go/go-lang.cc" 50) + nil ) + ) + nil 32 + (!type already_seen 522) + nil ) + + (!type struct 526 nil gc_pointed_to "lang_type" + (!srcfileloc "fortran/trans.h" 1011) + (!fields 14 + (!pair "rank" + (!type already_seen 2) + (!srcfileloc "fortran/trans.h" 998) + nil ) + (!pair "corank" + (!type already_seen 2) + (!srcfileloc "fortran/trans.h" 998) + nil ) + (!pair "akind" + (!type already_seen 2) + (!srcfileloc "fortran/trans.h" 999) + nil ) + (!pair "lbound" + (!type array 527 nil gc_used "GFC_MAX_DIMENSIONS" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans.h" 1000) + nil ) + (!pair "ubound" + (!type array 528 nil gc_used "GFC_MAX_DIMENSIONS" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans.h" 1001) + nil ) + (!pair "stride" + (!type array 529 nil gc_used "GFC_MAX_DIMENSIONS" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans.h" 1002) + nil ) + (!pair "size" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 1003) + nil ) + (!pair "offset" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 1004) + nil ) + (!pair "dtype" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 1005) + nil ) + (!pair "dataptr_type" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 1006) + nil ) + (!pair "base_decl" + (!type array 530 nil gc_used "2" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans.h" 1007) + nil ) + (!pair "nonrestricted_type" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 1008) + nil ) + (!pair "caf_token" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 1009) + nil ) + (!pair "caf_offset" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 1010) + nil ) + ) + nil 16 + (!type already_seen 522) + nil ) + + (!type struct 531 nil gc_pointed_to "lang_type" + (!srcfileloc "d/d-tree.h" 319) + (!fields 1 + (!pair "type" + (!type pointer 532 nil gc_unused + (!type struct 533 + (!type already_seen 532) + gc_unused "Type" + (!srcfileloc "d/d-tree.h" 318) + (!fields 0 ) + nil 8 nil nil ) + ) + (!srcfileloc "d/d-tree.h" 318) + (!options + (!option skip string "") + ) + ) + ) + nil 8 + (!type already_seen 522) + nil ) + + (!type struct 534 nil gc_pointed_to "lang_type" + (!srcfileloc "cp/cp-tree.h" 2395) + (!fields 67 + (!pair "align" + (!type already_seen 8) + (!srcfileloc "cp/cp-tree.h" 2307) + nil ) + (!pair "has_type_conversion" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2309) + nil ) + (!pair "has_copy_ctor" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2310) + nil ) + (!pair "has_default_ctor" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2311) + nil ) + (!pair "const_needs_init" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2312) + nil ) + (!pair "ref_needs_init" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2313) + nil ) + (!pair "has_const_copy_assign" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2314) + nil ) + (!pair "use_template" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2315) + nil ) + (!pair "has_mutable" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2317) + nil ) + (!pair "com_interface" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2318) + nil ) + (!pair "non_pod_class" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2319) + nil ) + (!pair "nearly_empty_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2320) + nil ) + (!pair "user_align" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2321) + nil ) + (!pair "has_copy_assign" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2322) + nil ) + (!pair "has_new" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2323) + nil ) + (!pair "has_array_new" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2324) + nil ) + (!pair "gets_delete" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2326) + nil ) + (!pair "interface_only" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2327) + nil ) + (!pair "interface_unknown" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2328) + nil ) + (!pair "contains_empty_class_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2329) + nil ) + (!pair "anon_aggr" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2330) + nil ) + (!pair "non_zero_init" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2331) + nil ) + (!pair "empty_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2332) + nil ) + (!pair "vec_new_uses_cookie" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2335) + nil ) + (!pair "declared_class" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2336) + nil ) + (!pair "diamond_shaped" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2337) + nil ) + (!pair "repeated_base" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2338) + nil ) + (!pair "being_defined" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2339) + nil ) + (!pair "debug_requested" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2340) + nil ) + (!pair "fields_readonly" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2341) + nil ) + (!pair "ptrmemfunc_flag" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2342) + nil ) + (!pair "lazy_default_ctor" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2344) + nil ) + (!pair "lazy_copy_ctor" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2345) + nil ) + (!pair "lazy_copy_assign" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2346) + nil ) + (!pair "lazy_destructor" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2347) + nil ) + (!pair "has_const_copy_ctor" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2348) + nil ) + (!pair "has_complex_copy_ctor" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2349) + nil ) + (!pair "has_complex_copy_assign" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2350) + nil ) + (!pair "non_aggregate" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2351) + nil ) + (!pair "has_complex_dflt" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2353) + nil ) + (!pair "has_list_ctor" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2354) + nil ) + (!pair "non_std_layout" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2355) + nil ) + (!pair "is_literal" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2356) + nil ) + (!pair "lazy_move_ctor" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2357) + nil ) + (!pair "lazy_move_assign" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2358) + nil ) + (!pair "has_complex_move_ctor" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2359) + nil ) + (!pair "has_complex_move_assign" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2360) + nil ) + (!pair "has_constexpr_ctor" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2362) + nil ) + (!pair "unique_obj_representations" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2363) + nil ) + (!pair "unique_obj_representations_set" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2364) + nil ) + (!pair "erroneous" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2365) + nil ) + (!pair "non_pod_aggregate" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2366) + nil ) + (!pair "dummy" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 2375) + nil ) + (!pair "primary_base" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2377) + nil ) + (!pair "vcall_indices" + (!type pointer 535 nil gc_used + (!type user_struct 536 + (!type already_seen 535) + gc_pointed_to "vec" + (!srcfileloc "cp/cp-tree.h" 2378) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/cp-tree.h" 2378) + nil ) + (!pair "tree_pair_s" + (!type struct 537 + (!type pointer 538 nil gc_used + (!type already_seen 537) + ) + gc_pointed_to "tree_pair_s" + (!srcfileloc "cp/cp-tree.h" 2291) + (!fields 2 + (!pair "purpose" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2288) + nil ) + (!pair "value" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2289) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 2378) + nil ) + ) + ) + ) + (!srcfileloc "cp/cp-tree.h" 2378) + nil ) + (!pair "vtables" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2379) + nil ) + (!pair "typeinfo_var" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2380) + nil ) + (!pair "vbases" + (!type already_seen 84) + (!srcfileloc "cp/cp-tree.h" 2381) + nil ) + (!pair "as_base" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2382) + nil ) + (!pair "pure_virtuals" + (!type already_seen 84) + (!srcfileloc "cp/cp-tree.h" 2383) + nil ) + (!pair "friend_classes" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2384) + nil ) + (!pair "members" + (!type already_seen 84) + (!srcfileloc "cp/cp-tree.h" 2385) + (!options + (!option reorder string "resort_type_member_vec") + ) + ) + (!pair "key_method" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2386) + nil ) + (!pair "decl_list" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2387) + nil ) + (!pair "befriending_classes" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2388) + nil ) + (!pair "objc_info" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2392) + nil ) + (!pair "lambda_expr" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 2394) + nil ) + ) + nil 516 + (!type already_seen 522) + nil ) + + (!type struct 539 nil gc_pointed_to "lang_type" + (!srcfileloc "c/c-lang.h" 42) + (!fields 4 + (!pair "s" + (!type pointer 540 nil gc_used + (!type struct 541 + (!type already_seen 540) + gc_pointed_to "sorted_fields_type" + (!srcfileloc "c/c-lang.h" 30) + (!fields 2 + (!pair "len" + (!type already_seen 2) + (!srcfileloc "c/c-lang.h" 28) + nil ) + (!pair "elts" + (!type array 542 nil gc_used "1" + (!type already_seen 23) + ) + (!srcfileloc "c/c-lang.h" 29) + (!options + (!option length string "%h.len") + ) + ) + ) + nil 258 nil nil ) + ) + (!srcfileloc "c/c-lang.h" 34) + (!options + (!option reorder string "resort_sorted_fields") + ) + ) + (!pair "enum_min" + (!type already_seen 23) + (!srcfileloc "c/c-lang.h" 36) + nil ) + (!pair "enum_max" + (!type already_seen 23) + (!srcfileloc "c/c-lang.h" 37) + nil ) + (!pair "objc_info" + (!type already_seen 23) + (!srcfileloc "c/c-lang.h" 41) + nil ) + ) + nil 258 + (!type already_seen 522) + nil ) + + (!type struct 543 nil gc_pointed_to "lang_type" + (!srcfileloc "ada/gcc-interface/ada-tree.h" 36) + (!fields 2 + (!pair "t1" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/ada-tree.h" 36) + nil ) + (!pair "t2" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/ada-tree.h" 36) + nil ) + ) + nil 1 + (!type already_seen 522) + nil ) + ) + ) + ) + (!srcfileloc "tree-core.h" 1722) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2072) + (!options + (!option tag string "TS_TYPE_WITH_LANG_SPECIFIC") + ) + ) + (!pair "type_non_common" + (!type struct 544 nil gc_used "tree_type_non_common" + (!srcfileloc "tree-core.h" 1731) + (!fields 5 + (!pair "with_lang_specific" + (!type already_seen 520) + (!srcfileloc "tree-core.h" 1726) + nil ) + (!pair "values" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1727) + nil ) + (!pair "minval" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1728) + nil ) + (!pair "maxval" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1729) + nil ) + (!pair "lang_1" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1730) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2074) + (!options + (!option tag string "TS_TYPE_NON_COMMON") + ) + ) + (!pair "list" + (!type struct 545 nil gc_used "tree_list" + (!srcfileloc "tree-core.h" 1503) + (!fields 3 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "tree-core.h" 1500) + nil ) + (!pair "purpose" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1501) + nil ) + (!pair "value" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1502) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2075) + (!options + (!option tag string "TS_LIST") + ) + ) + (!pair "vec" + (!type struct 546 nil gc_used "tree_vec" + (!srcfileloc "tree-core.h" 1508) + (!fields 2 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "tree-core.h" 1506) + nil ) + (!pair "a" + (!type array 547 nil gc_used "1" + (!type already_seen 23) + ) + (!srcfileloc "tree-core.h" 1507) + (!options + (!option length string "TREE_VEC_LENGTH ((tree)&%h)") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2076) + (!options + (!option tag string "TS_VEC") + ) + ) + (!pair "exp" + (!type struct 548 nil gc_used "tree_exp" + (!srcfileloc "tree-core.h" 1569) + (!fields 3 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "tree-core.h" 1564) + nil ) + (!pair "locus" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1565) + nil ) + (!pair "operands" + (!type union 549 nil gc_used "tree_exp_subunion" + (!srcfileloc "tree-core.h" 1568) + (!fields 1 + (!pair "" + (!type array 550 nil gc_used "1" + (!type already_seen 23) + ) + (!srcfileloc "gengtype.cc" 1408) + (!options + (!option default string "") + (!option length string "TREE_OPERAND_LENGTH ((tree) &%0)") + (!option dot string "") + ) + ) + ) + (!options + (!option dot string "") + ) + 1023 nil ) + (!srcfileloc "tree-core.h" 1568) + (!options + (!option desc string "TREE_CODE ((tree) &%0)") + (!option special string "tree_exp") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2077) + (!options + (!option tag string "TS_EXP") + ) + ) + (!pair "ssa_name" + (!type struct 551 nil gc_used "tree_ssa_name" + (!srcfileloc "tree-core.h" 1605) + (!fields 5 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "tree-core.h" 1586) + nil ) + (!pair "var" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1589) + nil ) + (!pair "def_stmt" + (!type already_seen 281) + (!srcfileloc "tree-core.h" 1592) + nil ) + (!pair "info" + (!type union 552 nil gc_used "ssa_name_info_type" + (!srcfileloc "tree-core.h" 1600) + (!fields 2 + (!pair "ptr_info" + (!type pointer 553 nil gc_used + (!type struct 554 + (!type already_seen 553) + gc_pointed_to "ptr_info_def" + (!srcfileloc "tree-ssanames.h" 46) + (!fields 3 + (!pair "pt" + (!type already_seen 385) + (!srcfileloc "tree-ssanames.h" 28) + nil ) + (!pair "align" + (!type already_seen 2) + (!srcfileloc "tree-ssanames.h" 40) + nil ) + (!pair "misalign" + (!type already_seen 2) + (!srcfileloc "tree-ssanames.h" 45) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "tree-core.h" 1597) + (!options + (!option tag string "0") + ) + ) + (!pair "range_info" + (!type pointer 555 nil gc_used + (!type struct 556 + (!type already_seen 555) + gc_pointed_to "range_info_def" + (!srcfileloc "tree-ssanames.h" 56) + (!fields 0 ) + (!options + (!option variable_size string "") + ) + 1023 nil nil ) + ) + (!srcfileloc "tree-core.h" 1599) + (!options + (!option tag string "1") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "tree-core.h" 1601) + (!options + (!option desc string "%1.typed.type ?!POINTER_TYPE_P (TREE_TYPE ((tree)&%1)) : 2") + ) + ) + (!pair "imm_uses" + (!type struct 557 + (!type pointer 558 nil gc_unused + (!type already_seen 557) + ) + gc_used "ssa_use_operand_t" + (!srcfileloc "tree-ssa-operands.h" 30) + (!fields 4 + (!pair "prev" + (!type already_seen 558) + (!srcfileloc "tree-core.h" 1574) + (!options + (!option skip string "") + ) + ) + (!pair "next" + (!type already_seen 558) + (!srcfileloc "tree-core.h" 1575) + (!options + (!option skip string "") + ) + ) + (!pair "loc" + (!type union 559 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/tree-core.h:1581" + (!srcfileloc "tree-core.h" 1581) + (!fields 2 + (!pair "stmt" + (!type already_seen 281) + (!srcfileloc "tree-core.h" 1581) + nil ) + (!pair "ssa_name" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1581) + nil ) + ) + nil 1023 nil ) + (!srcfileloc "tree-core.h" 1581) + (!options + (!option skip string "") + ) + ) + (!pair "use" + (!type already_seen 24) + (!srcfileloc "tree-core.h" 1582) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 1604) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2078) + (!options + (!option tag string "TS_SSA_NAME") + ) + ) + (!pair "block" + (!type struct 560 nil gc_used "tree_block" + (!srcfileloc "tree-core.h" 1663) + (!fields 13 + (!pair "base" + (!type already_seen 25) + (!srcfileloc "tree-core.h" 1644) + nil ) + (!pair "chain" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1645) + nil ) + (!pair "block_num" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1647) + nil ) + (!pair "locus" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1649) + nil ) + (!pair "end_locus" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1650) + nil ) + (!pair "vars" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1652) + nil ) + (!pair "nonlocalized_vars" + (!type already_seen 84) + (!srcfileloc "tree-core.h" 1653) + nil ) + (!pair "subblocks" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1655) + nil ) + (!pair "supercontext" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1656) + nil ) + (!pair "abstract_origin" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1657) + nil ) + (!pair "fragment_origin" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1658) + nil ) + (!pair "fragment_chain" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1659) + nil ) + (!pair "die" + (!type already_seen 487) + (!srcfileloc "tree-core.h" 1662) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2079) + (!options + (!option tag string "TS_BLOCK") + ) + ) + (!pair "binfo" + (!type struct 561 nil gc_used "tree_binfo" + (!srcfileloc "tree-core.h" 1747) + (!fields 10 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "tree-core.h" 1734) + nil ) + (!pair "offset" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1736) + nil ) + (!pair "vtable" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1737) + nil ) + (!pair "virtuals" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1738) + nil ) + (!pair "vptr_field" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1739) + nil ) + (!pair "base_accesses" + (!type already_seen 84) + (!srcfileloc "tree-core.h" 1740) + nil ) + (!pair "inheritance" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1741) + nil ) + (!pair "vtt_subvtt" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1743) + nil ) + (!pair "vtt_vptr" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1744) + nil ) + (!pair "base_binfos" + (!type already_seen 85) + (!srcfileloc "tree-core.h" 1746) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2080) + (!options + (!option tag string "TS_BINFO") + ) + ) + (!pair "stmt_list" + (!type struct 562 nil gc_used "tree_statement_list" + (!srcfileloc "tree-core.h" 2002) + (!fields 3 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "tree-core.h" 1999) + nil ) + (!pair "head" + (!type pointer 563 nil gc_used + (!type struct 564 + (!type already_seen 563) + gc_pointed_to "tree_statement_list_node" + (!srcfileloc "tree-core.h" 1995) + (!fields 3 + (!pair "prev" + (!type already_seen 563) + (!srcfileloc "tree-core.h" 1992) + nil ) + (!pair "next" + (!type already_seen 563) + (!srcfileloc "tree-core.h" 1993) + nil ) + (!pair "stmt" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1994) + nil ) + ) + (!options + (!option chain_prev string "%h.prev") + (!option chain_next string "%h.next") + ) + 1023 nil nil ) + ) + (!srcfileloc "tree-core.h" 2000) + nil ) + (!pair "tail" + (!type already_seen 563) + (!srcfileloc "tree-core.h" 2001) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2081) + (!options + (!option tag string "TS_STATEMENT_LIST") + ) + ) + (!pair "constructor" + (!type struct 565 nil gc_used "tree_constructor" + (!srcfileloc "tree-core.h" 1522) + (!fields 2 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "tree-core.h" 1520) + nil ) + (!pair "elts" + (!type pointer 566 nil gc_used + (!type user_struct 567 + (!type already_seen 566) + gc_pointed_to "vec" + (!srcfileloc "tree-core.h" 1521) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "tree-core.h" 1521) + nil ) + (!pair "constructor_elt" + (!type struct 568 nil gc_used "constructor_elt" + (!srcfileloc "tree-core.h" 1521) + (!fields 2 + (!pair "index" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1515) + nil ) + (!pair "value" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1516) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 1521) + nil ) + ) + ) + ) + (!srcfileloc "tree-core.h" 1521) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2082) + (!options + (!option tag string "TS_CONSTRUCTOR") + ) + ) + (!pair "omp_clause" + (!type struct 569 nil gc_used "tree_omp_clause" + (!srcfileloc "tree-core.h" 1641) + (!fields 7 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "tree-core.h" 1616) + nil ) + (!pair "locus" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1617) + nil ) + (!pair "code" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1618) + nil ) + (!pair "subcode" + (!type union 570 nil gc_unused "omp_clause_subcode" + (!srcfileloc "tree-core.h" 1632) + (!fields 11 + (!pair "default_kind" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1620) + nil ) + (!pair "schedule_kind" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1621) + nil ) + (!pair "depend_kind" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1622) + nil ) + (!pair "map_kind" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1624) + nil ) + (!pair "proc_bind_kind" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1625) + nil ) + (!pair "reduction_code" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1626) + nil ) + (!pair "linear_kind" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1627) + nil ) + (!pair "if_modifier" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1628) + nil ) + (!pair "defaultmap_kind" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1629) + nil ) + (!pair "bind_kind" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1630) + nil ) + (!pair "device_type_kind" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1631) + nil ) + ) + nil 1023 nil ) + (!srcfileloc "tree-core.h" 1632) + (!options + (!option skip string "") + ) + ) + (!pair "gimple_reduction_init" + (!type already_seen 281) + (!srcfileloc "tree-core.h" 1636) + nil ) + (!pair "gimple_reduction_merge" + (!type already_seen 281) + (!srcfileloc "tree-core.h" 1637) + nil ) + (!pair "ops" + (!type array 571 nil gc_used "1" + (!type already_seen 23) + ) + (!srcfileloc "tree-core.h" 1640) + (!options + (!option length string "omp_clause_num_ops[OMP_CLAUSE_CODE ((tree)&%h)]") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2083) + (!options + (!option tag string "TS_OMP_CLAUSE") + ) + ) + (!pair "optimization" + (!type struct 572 nil gc_used "tree_optimization_option" + (!srcfileloc "tree-core.h" 2020) + (!fields 4 + (!pair "base" + (!type already_seen 25) + (!srcfileloc "tree-core.h" 2008) + nil ) + (!pair "opts" + (!type pointer 573 nil gc_used + (!type struct 574 + (!type already_seen 573) + gc_pointed_to "cl_optimization" + (!fileloc "options.h" 8735) + (!fields 496 + (!pair "x_str_align_functions" + (!type already_seen 11) + (!fileloc "options.h" 8238) + nil ) + (!pair "x_str_align_jumps" + (!type already_seen 11) + (!fileloc "options.h" 8239) + nil ) + (!pair "x_str_align_labels" + (!type already_seen 11) + (!fileloc "options.h" 8240) + nil ) + (!pair "x_str_align_loops" + (!type already_seen 11) + (!fileloc "options.h" 8241) + nil ) + (!pair "x_flag_patchable_function_entry" + (!type already_seen 11) + (!fileloc "options.h" 8242) + nil ) + (!pair "x_param_align_loop_iterations" + (!type already_seen 2) + (!fileloc "options.h" 8243) + nil ) + (!pair "x_param_align_threshold" + (!type already_seen 2) + (!fileloc "options.h" 8244) + nil ) + (!pair "x_param_asan_protect_allocas" + (!type already_seen 2) + (!fileloc "options.h" 8245) + nil ) + (!pair "x_param_asan_instrument_reads" + (!type already_seen 2) + (!fileloc "options.h" 8246) + nil ) + (!pair "x_param_asan_instrument_writes" + (!type already_seen 2) + (!fileloc "options.h" 8247) + nil ) + (!pair "x_param_asan_instrumentation_with_call_threshold" + (!type already_seen 2) + (!fileloc "options.h" 8248) + nil ) + (!pair "x_param_asan_memintrin" + (!type already_seen 2) + (!fileloc "options.h" 8249) + nil ) + (!pair "x_param_asan_stack" + (!type already_seen 2) + (!fileloc "options.h" 8250) + nil ) + (!pair "x_param_asan_use_after_return" + (!type already_seen 2) + (!fileloc "options.h" 8251) + nil ) + (!pair "x_param_avg_loop_niter" + (!type already_seen 2) + (!fileloc "options.h" 8252) + nil ) + (!pair "x_param_avoid_fma_max_bits" + (!type already_seen 2) + (!fileloc "options.h" 8253) + nil ) + (!pair "x_param_builtin_expect_probability" + (!type already_seen 2) + (!fileloc "options.h" 8254) + nil ) + (!pair "x_param_builtin_string_cmp_inline_length" + (!type already_seen 2) + (!fileloc "options.h" 8255) + nil ) + (!pair "x_param_case_values_threshold" + (!type already_seen 2) + (!fileloc "options.h" 8256) + nil ) + (!pair "x_param_comdat_sharing_probability" + (!type already_seen 2) + (!fileloc "options.h" 8257) + nil ) + (!pair "x_param_construct_interfere_size" + (!type already_seen 2) + (!fileloc "options.h" 8258) + nil ) + (!pair "x_param_destruct_interfere_size" + (!type already_seen 2) + (!fileloc "options.h" 8259) + nil ) + (!pair "x_param_dse_max_alias_queries_per_store" + (!type already_seen 2) + (!fileloc "options.h" 8260) + nil ) + (!pair "x_param_dse_max_object_size" + (!type already_seen 2) + (!fileloc "options.h" 8261) + nil ) + (!pair "x_param_early_inlining_insns" + (!type already_seen 2) + (!fileloc "options.h" 8262) + nil ) + (!pair "x_param_evrp_sparse_threshold" + (!type already_seen 2) + (!fileloc "options.h" 8263) + nil ) + (!pair "x_param_evrp_switch_limit" + (!type already_seen 2) + (!fileloc "options.h" 8264) + nil ) + (!pair "x_param_fsm_scale_path_blocks" + (!type already_seen 2) + (!fileloc "options.h" 8265) + nil ) + (!pair "x_param_fsm_scale_path_stmts" + (!type already_seen 2) + (!fileloc "options.h" 8266) + nil ) + (!pair "x_param_gcse_after_reload_critical_fraction" + (!type already_seen 2) + (!fileloc "options.h" 8267) + nil ) + (!pair "x_param_gcse_after_reload_partial_fraction" + (!type already_seen 2) + (!fileloc "options.h" 8268) + nil ) + (!pair "x_param_gcse_cost_distance_ratio" + (!type already_seen 2) + (!fileloc "options.h" 8269) + nil ) + (!pair "x_param_gcse_unrestricted_cost" + (!type already_seen 2) + (!fileloc "options.h" 8270) + nil ) + (!pair "x_param_graphite_max_arrays_per_scop" + (!type already_seen 2) + (!fileloc "options.h" 8271) + nil ) + (!pair "x_param_graphite_max_nb_scop_params" + (!type already_seen 2) + (!fileloc "options.h" 8272) + nil ) + (!pair "x_param_hwasan_instrument_allocas" + (!type already_seen 2) + (!fileloc "options.h" 8273) + nil ) + (!pair "x_param_hwasan_instrument_mem_intrinsics" + (!type already_seen 2) + (!fileloc "options.h" 8274) + nil ) + (!pair "x_param_hwasan_instrument_reads" + (!type already_seen 2) + (!fileloc "options.h" 8275) + nil ) + (!pair "x_param_hwasan_instrument_stack" + (!type already_seen 2) + (!fileloc "options.h" 8276) + nil ) + (!pair "x_param_hwasan_instrument_writes" + (!type already_seen 2) + (!fileloc "options.h" 8277) + nil ) + (!pair "x_param_hwasan_random_frame_tag" + (!type already_seen 2) + (!fileloc "options.h" 8278) + nil ) + (!pair "x_param_inline_heuristics_hint_percent" + (!type already_seen 2) + (!fileloc "options.h" 8279) + nil ) + (!pair "x_param_inline_min_speedup" + (!type already_seen 2) + (!fileloc "options.h" 8280) + nil ) + (!pair "x_param_inline_unit_growth" + (!type already_seen 2) + (!fileloc "options.h" 8281) + nil ) + (!pair "x_param_ipa_cp_eval_threshold" + (!type already_seen 2) + (!fileloc "options.h" 8282) + nil ) + (!pair "x_param_ipa_cp_large_unit_insns" + (!type already_seen 2) + (!fileloc "options.h" 8283) + nil ) + (!pair "x_param_ipa_cp_loop_hint_bonus" + (!type already_seen 2) + (!fileloc "options.h" 8284) + nil ) + (!pair "x_param_ipa_cp_max_recursive_depth" + (!type already_seen 2) + (!fileloc "options.h" 8285) + nil ) + (!pair "x_param_ipa_cp_min_recursive_probability" + (!type already_seen 2) + (!fileloc "options.h" 8286) + nil ) + (!pair "x_param_ipa_cp_profile_count_base" + (!type already_seen 2) + (!fileloc "options.h" 8287) + nil ) + (!pair "x_param_ipa_cp_recursion_penalty" + (!type already_seen 2) + (!fileloc "options.h" 8288) + nil ) + (!pair "x_param_ipa_cp_recursive_freq_factor" + (!type already_seen 2) + (!fileloc "options.h" 8289) + nil ) + (!pair "x_param_ipa_cp_single_call_penalty" + (!type already_seen 2) + (!fileloc "options.h" 8290) + nil ) + (!pair "x_param_ipa_cp_unit_growth" + (!type already_seen 2) + (!fileloc "options.h" 8291) + nil ) + (!pair "x_param_ipa_cp_value_list_size" + (!type already_seen 2) + (!fileloc "options.h" 8292) + nil ) + (!pair "x_param_ipa_jump_function_lookups" + (!type already_seen 2) + (!fileloc "options.h" 8293) + nil ) + (!pair "x_param_ipa_max_aa_steps" + (!type already_seen 2) + (!fileloc "options.h" 8294) + nil ) + (!pair "x_param_ipa_max_agg_items" + (!type already_seen 2) + (!fileloc "options.h" 8295) + nil ) + (!pair "x_param_ipa_max_loop_predicates" + (!type already_seen 2) + (!fileloc "options.h" 8296) + nil ) + (!pair "x_param_ipa_max_param_expr_ops" + (!type already_seen 2) + (!fileloc "options.h" 8297) + nil ) + (!pair "x_param_ipa_max_switch_predicate_bounds" + (!type already_seen 2) + (!fileloc "options.h" 8298) + nil ) + (!pair "x_param_ipa_sra_max_replacements" + (!type already_seen 2) + (!fileloc "options.h" 8299) + nil ) + (!pair "x_param_ipa_sra_ptr_growth_factor" + (!type already_seen 2) + (!fileloc "options.h" 8300) + nil ) + (!pair "x_param_ira_consider_dup_in_all_alts" + (!type already_seen 2) + (!fileloc "options.h" 8301) + nil ) + (!pair "x_param_ira_loop_reserved_regs" + (!type already_seen 2) + (!fileloc "options.h" 8302) + nil ) + (!pair "x_param_ira_max_conflict_table_size" + (!type already_seen 2) + (!fileloc "options.h" 8303) + nil ) + (!pair "x_param_ira_max_loops_num" + (!type already_seen 2) + (!fileloc "options.h" 8304) + nil ) + (!pair "x_param_iv_always_prune_cand_set_bound" + (!type already_seen 2) + (!fileloc "options.h" 8305) + nil ) + (!pair "x_param_iv_consider_all_candidates_bound" + (!type already_seen 2) + (!fileloc "options.h" 8306) + nil ) + (!pair "x_param_iv_max_considered_uses" + (!type already_seen 2) + (!fileloc "options.h" 8307) + nil ) + (!pair "x_param_jump_table_max_growth_ratio_for_size" + (!type already_seen 2) + (!fileloc "options.h" 8308) + nil ) + (!pair "x_param_jump_table_max_growth_ratio_for_speed" + (!type already_seen 2) + (!fileloc "options.h" 8309) + nil ) + (!pair "x_param_l1_cache_line_size" + (!type already_seen 2) + (!fileloc "options.h" 8310) + nil ) + (!pair "x_param_l1_cache_size" + (!type already_seen 2) + (!fileloc "options.h" 8311) + nil ) + (!pair "x_param_l2_cache_size" + (!type already_seen 2) + (!fileloc "options.h" 8312) + nil ) + (!pair "x_param_large_function_growth" + (!type already_seen 2) + (!fileloc "options.h" 8313) + nil ) + (!pair "x_param_large_function_insns" + (!type already_seen 2) + (!fileloc "options.h" 8314) + nil ) + (!pair "x_param_stack_frame_growth" + (!type already_seen 2) + (!fileloc "options.h" 8315) + nil ) + (!pair "x_param_large_stack_frame" + (!type already_seen 2) + (!fileloc "options.h" 8316) + nil ) + (!pair "x_param_large_unit_insns" + (!type already_seen 2) + (!fileloc "options.h" 8317) + nil ) + (!pair "x_param_lim_expensive" + (!type already_seen 2) + (!fileloc "options.h" 8318) + nil ) + (!pair "x_param_loop_block_tile_size" + (!type already_seen 2) + (!fileloc "options.h" 8319) + nil ) + (!pair "x_param_loop_interchange_max_num_stmts" + (!type already_seen 2) + (!fileloc "options.h" 8320) + nil ) + (!pair "x_param_loop_interchange_stride_ratio" + (!type already_seen 2) + (!fileloc "options.h" 8321) + nil ) + (!pair "x_param_loop_invariant_max_bbs_in_loop" + (!type already_seen 2) + (!fileloc "options.h" 8322) + nil ) + (!pair "x_param_loop_max_datarefs_for_datadeps" + (!type already_seen 2) + (!fileloc "options.h" 8323) + nil ) + (!pair "x_param_loop_versioning_max_inner_insns" + (!type already_seen 2) + (!fileloc "options.h" 8324) + nil ) + (!pair "x_param_loop_versioning_max_outer_insns" + (!type already_seen 2) + (!fileloc "options.h" 8325) + nil ) + (!pair "x_param_lra_inheritance_ebb_probability_cutoff" + (!type already_seen 2) + (!fileloc "options.h" 8326) + nil ) + (!pair "x_param_lra_max_considered_reload_pseudos" + (!type already_seen 2) + (!fileloc "options.h" 8327) + nil ) + (!pair "x_param_max_average_unrolled_insns" + (!type already_seen 2) + (!fileloc "options.h" 8328) + nil ) + (!pair "x_param_max_combine_insns" + (!type already_seen 2) + (!fileloc "options.h" 8329) + nil ) + (!pair "x_param_max_unroll_iterations" + (!type already_seen 2) + (!fileloc "options.h" 8330) + nil ) + (!pair "x_param_max_completely_peel_times" + (!type already_seen 2) + (!fileloc "options.h" 8331) + nil ) + (!pair "x_param_max_completely_peeled_insns" + (!type already_seen 2) + (!fileloc "options.h" 8332) + nil ) + (!pair "x_param_max_crossjump_edges" + (!type already_seen 2) + (!fileloc "options.h" 8333) + nil ) + (!pair "x_param_max_cse_insns" + (!type already_seen 2) + (!fileloc "options.h" 8334) + nil ) + (!pair "x_param_max_cse_path_length" + (!type already_seen 2) + (!fileloc "options.h" 8335) + nil ) + (!pair "x_param_max_cselib_memory_locations" + (!type already_seen 2) + (!fileloc "options.h" 8336) + nil ) + (!pair "x_param_max_debug_marker_count" + (!type already_seen 2) + (!fileloc "options.h" 8337) + nil ) + (!pair "x_param_max_delay_slot_insn_search" + (!type already_seen 2) + (!fileloc "options.h" 8338) + nil ) + (!pair "x_param_max_delay_slot_live_search" + (!type already_seen 2) + (!fileloc "options.h" 8339) + nil ) + (!pair "x_param_max_dse_active_local_stores" + (!type already_seen 2) + (!fileloc "options.h" 8340) + nil ) + (!pair "x_param_early_inliner_max_iterations" + (!type already_seen 2) + (!fileloc "options.h" 8341) + nil ) + (!pair "x_param_max_find_base_term_values" + (!type already_seen 2) + (!fileloc "options.h" 8342) + nil ) + (!pair "x_param_max_fsm_thread_length" + (!type already_seen 2) + (!fileloc "options.h" 8343) + nil ) + (!pair "x_param_max_fsm_thread_path_insns" + (!type already_seen 2) + (!fileloc "options.h" 8344) + nil ) + (!pair "x_param_max_gcse_insertion_ratio" + (!type already_seen 2) + (!fileloc "options.h" 8345) + nil ) + (!pair "x_param_max_gcse_memory" + (!type already_seen 2) + (!fileloc "options.h" 8346) + nil ) + (!pair "x_param_max_goto_duplication_insns" + (!type already_seen 2) + (!fileloc "options.h" 8347) + nil ) + (!pair "x_param_max_grow_copy_bb_insns" + (!type already_seen 2) + (!fileloc "options.h" 8348) + nil ) + (!pair "x_param_max_hoist_depth" + (!type already_seen 2) + (!fileloc "options.h" 8349) + nil ) + (!pair "x_param_inline_functions_called_once_insns" + (!type already_seen 2) + (!fileloc "options.h" 8350) + nil ) + (!pair "x_param_inline_functions_called_once_loop_depth" + (!type already_seen 2) + (!fileloc "options.h" 8351) + nil ) + (!pair "x_param_max_inline_insns_auto" + (!type already_seen 2) + (!fileloc "options.h" 8352) + nil ) + (!pair "x_param_max_inline_insns_recursive_auto" + (!type already_seen 2) + (!fileloc "options.h" 8353) + nil ) + (!pair "x_param_max_inline_insns_recursive" + (!type already_seen 2) + (!fileloc "options.h" 8354) + nil ) + (!pair "x_param_max_inline_insns_single" + (!type already_seen 2) + (!fileloc "options.h" 8355) + nil ) + (!pair "x_param_max_inline_insns_size" + (!type already_seen 2) + (!fileloc "options.h" 8356) + nil ) + (!pair "x_param_max_inline_insns_small" + (!type already_seen 2) + (!fileloc "options.h" 8357) + nil ) + (!pair "x_param_max_inline_recursive_depth_auto" + (!type already_seen 2) + (!fileloc "options.h" 8358) + nil ) + (!pair "x_param_max_inline_recursive_depth" + (!type already_seen 2) + (!fileloc "options.h" 8359) + nil ) + (!pair "x_param_max_isl_operations" + (!type already_seen 2) + (!fileloc "options.h" 8360) + nil ) + (!pair "x_param_max_iterations_computation_cost" + (!type already_seen 2) + (!fileloc "options.h" 8361) + nil ) + (!pair "x_param_max_iterations_to_track" + (!type already_seen 2) + (!fileloc "options.h" 8362) + nil ) + (!pair "x_param_max_jump_thread_duplication_stmts" + (!type already_seen 2) + (!fileloc "options.h" 8363) + nil ) + (!pair "x_param_max_last_value_rtl" + (!type already_seen 2) + (!fileloc "options.h" 8364) + nil ) + (!pair "x_param_max_loop_header_insns" + (!type already_seen 2) + (!fileloc "options.h" 8365) + nil ) + (!pair "x_param_max_modulo_backtrack_attempts" + (!type already_seen 2) + (!fileloc "options.h" 8366) + nil ) + (!pair "x_param_max_partial_antic_length" + (!type already_seen 2) + (!fileloc "options.h" 8367) + nil ) + (!pair "x_param_max_peel_branches" + (!type already_seen 2) + (!fileloc "options.h" 8368) + nil ) + (!pair "x_param_max_peel_times" + (!type already_seen 2) + (!fileloc "options.h" 8369) + nil ) + (!pair "x_param_max_peeled_insns" + (!type already_seen 2) + (!fileloc "options.h" 8370) + nil ) + (!pair "x_param_max_pending_list_length" + (!type already_seen 2) + (!fileloc "options.h" 8371) + nil ) + (!pair "x_param_max_pipeline_region_blocks" + (!type already_seen 2) + (!fileloc "options.h" 8372) + nil ) + (!pair "x_param_max_pipeline_region_insns" + (!type already_seen 2) + (!fileloc "options.h" 8373) + nil ) + (!pair "x_param_max_pow_sqrt_depth" + (!type already_seen 2) + (!fileloc "options.h" 8374) + nil ) + (!pair "x_param_max_predicted_iterations" + (!type already_seen 2) + (!fileloc "options.h" 8375) + nil ) + (!pair "x_param_max_reload_search_insns" + (!type already_seen 2) + (!fileloc "options.h" 8376) + nil ) + (!pair "x_param_max_rtl_if_conversion_insns" + (!type already_seen 2) + (!fileloc "options.h" 8377) + nil ) + (!pair "x_param_max_rtl_if_conversion_predictable_cost" + (!type already_seen 2) + (!fileloc "options.h" 8378) + nil ) + (!pair "x_param_max_rtl_if_conversion_unpredictable_cost" + (!type already_seen 2) + (!fileloc "options.h" 8379) + nil ) + (!pair "x_param_max_sched_extend_regions_iters" + (!type already_seen 2) + (!fileloc "options.h" 8380) + nil ) + (!pair "x_param_max_sched_insn_conflict_delay" + (!type already_seen 2) + (!fileloc "options.h" 8381) + nil ) + (!pair "x_param_max_sched_ready_insns" + (!type already_seen 2) + (!fileloc "options.h" 8382) + nil ) + (!pair "x_param_max_sched_region_blocks" + (!type already_seen 2) + (!fileloc "options.h" 8383) + nil ) + (!pair "x_param_max_sched_region_insns" + (!type already_seen 2) + (!fileloc "options.h" 8384) + nil ) + (!pair "x_param_max_slsr_candidate_scan" + (!type already_seen 2) + (!fileloc "options.h" 8385) + nil ) + (!pair "x_param_max_speculative_devirt_maydefs" + (!type already_seen 2) + (!fileloc "options.h" 8386) + nil ) + (!pair "x_param_max_stores_to_merge" + (!type already_seen 2) + (!fileloc "options.h" 8387) + nil ) + (!pair "x_param_max_stores_to_sink" + (!type already_seen 2) + (!fileloc "options.h" 8388) + nil ) + (!pair "x_param_max_tail_merge_comparisons" + (!type already_seen 2) + (!fileloc "options.h" 8389) + nil ) + (!pair "x_param_max_tail_merge_iterations" + (!type already_seen 2) + (!fileloc "options.h" 8390) + nil ) + (!pair "x_param_max_tracked_strlens" + (!type already_seen 2) + (!fileloc "options.h" 8391) + nil ) + (!pair "x_param_max_tree_if_conversion_phi_args" + (!type already_seen 2) + (!fileloc "options.h" 8392) + nil ) + (!pair "x_param_max_unroll_times" + (!type already_seen 2) + (!fileloc "options.h" 8393) + nil ) + (!pair "x_param_max_unrolled_insns" + (!type already_seen 2) + (!fileloc "options.h" 8394) + nil ) + (!pair "x_param_max_unswitch_insns" + (!type already_seen 2) + (!fileloc "options.h" 8395) + nil ) + (!pair "x_param_max_unswitch_level" + (!type already_seen 2) + (!fileloc "options.h" 8396) + nil ) + (!pair "x_param_max_variable_expansions" + (!type already_seen 2) + (!fileloc "options.h" 8397) + nil ) + (!pair "x_param_max_vartrack_expr_depth" + (!type already_seen 2) + (!fileloc "options.h" 8398) + nil ) + (!pair "x_param_max_vartrack_reverse_op_size" + (!type already_seen 2) + (!fileloc "options.h" 8399) + nil ) + (!pair "x_param_max_vartrack_size" + (!type already_seen 2) + (!fileloc "options.h" 8400) + nil ) + (!pair "x_param_max_vrp_switch_assertions" + (!type already_seen 2) + (!fileloc "options.h" 8401) + nil ) + (!pair "x_param_min_crossjump_insns" + (!type already_seen 2) + (!fileloc "options.h" 8402) + nil ) + (!pair "x_param_min_inline_recursive_probability" + (!type already_seen 2) + (!fileloc "options.h" 8403) + nil ) + (!pair "x_param_min_insn_to_prefetch_ratio" + (!type already_seen 2) + (!fileloc "options.h" 8404) + nil ) + (!pair "x_param_min_loop_cond_split_prob" + (!type already_seen 2) + (!fileloc "options.h" 8405) + nil ) + (!pair "x_param_min_pagesize" + (!type already_seen 2) + (!fileloc "options.h" 8406) + nil ) + (!pair "x_param_min_size_for_stack_sharing" + (!type already_seen 2) + (!fileloc "options.h" 8407) + nil ) + (!pair "x_param_min_spec_prob" + (!type already_seen 2) + (!fileloc "options.h" 8408) + nil ) + (!pair "x_param_min_vect_loop_bound" + (!type already_seen 2) + (!fileloc "options.h" 8409) + nil ) + (!pair "x_param_modref_max_accesses" + (!type already_seen 2) + (!fileloc "options.h" 8410) + nil ) + (!pair "x_param_modref_max_adjustments" + (!type already_seen 2) + (!fileloc "options.h" 8411) + nil ) + (!pair "x_param_modref_max_bases" + (!type already_seen 2) + (!fileloc "options.h" 8412) + nil ) + (!pair "x_param_modref_max_depth" + (!type already_seen 2) + (!fileloc "options.h" 8413) + nil ) + (!pair "x_param_modref_max_escape_points" + (!type already_seen 2) + (!fileloc "options.h" 8414) + nil ) + (!pair "x_param_modref_max_refs" + (!type already_seen 2) + (!fileloc "options.h" 8415) + nil ) + (!pair "x_param_modref_max_tests" + (!type already_seen 2) + (!fileloc "options.h" 8416) + nil ) + (!pair "x_param_parloops_chunk_size" + (!type already_seen 2) + (!fileloc "options.h" 8417) + nil ) + (!pair "x_param_parloops_min_per_thread" + (!type already_seen 2) + (!fileloc "options.h" 8418) + nil ) + (!pair "x_param_parloops_schedule" + (!type already_seen 2) + (!fileloc "options.h" 8419) + nil ) + (!pair "x_param_partial_inlining_entry_probability" + (!type already_seen 2) + (!fileloc "options.h" 8420) + nil ) + (!pair "x_param_predictable_branch_outcome" + (!type already_seen 2) + (!fileloc "options.h" 8421) + nil ) + (!pair "x_param_prefetch_dynamic_strides" + (!type already_seen 2) + (!fileloc "options.h" 8422) + nil ) + (!pair "x_param_prefetch_latency" + (!type already_seen 2) + (!fileloc "options.h" 8423) + nil ) + (!pair "x_param_prefetch_min_insn_to_mem_ratio" + (!type already_seen 2) + (!fileloc "options.h" 8424) + nil ) + (!pair "x_param_prefetch_minimum_stride" + (!type already_seen 2) + (!fileloc "options.h" 8425) + nil ) + (!pair "x_param_ranger_logical_depth" + (!type already_seen 2) + (!fileloc "options.h" 8426) + nil ) + (!pair "x_param_relation_block_limit" + (!type already_seen 2) + (!fileloc "options.h" 8427) + nil ) + (!pair "x_param_rpo_vn_max_loop_depth" + (!type already_seen 2) + (!fileloc "options.h" 8428) + nil ) + (!pair "x_param_sccvn_max_alias_queries_per_access" + (!type already_seen 2) + (!fileloc "options.h" 8429) + nil ) + (!pair "x_param_scev_max_expr_complexity" + (!type already_seen 2) + (!fileloc "options.h" 8430) + nil ) + (!pair "x_param_scev_max_expr_size" + (!type already_seen 2) + (!fileloc "options.h" 8431) + nil ) + (!pair "x_param_sched_autopref_queue_depth" + (!type already_seen 2) + (!fileloc "options.h" 8432) + nil ) + (!pair "x_param_sched_mem_true_dep_cost" + (!type already_seen 2) + (!fileloc "options.h" 8433) + nil ) + (!pair "x_param_sched_pressure_algorithm" + (!type already_seen 2) + (!fileloc "options.h" 8434) + nil ) + (!pair "x_param_sched_spec_prob_cutoff" + (!type already_seen 2) + (!fileloc "options.h" 8435) + nil ) + (!pair "x_param_sched_state_edge_prob_cutoff" + (!type already_seen 2) + (!fileloc "options.h" 8436) + nil ) + (!pair "x_param_selsched_insns_to_rename" + (!type already_seen 2) + (!fileloc "options.h" 8437) + nil ) + (!pair "x_param_selsched_max_lookahead" + (!type already_seen 2) + (!fileloc "options.h" 8438) + nil ) + (!pair "x_param_selsched_max_sched_times" + (!type already_seen 2) + (!fileloc "options.h" 8439) + nil ) + (!pair "x_param_simultaneous_prefetches" + (!type already_seen 2) + (!fileloc "options.h" 8440) + nil ) + (!pair "x_param_sink_frequency_threshold" + (!type already_seen 2) + (!fileloc "options.h" 8441) + nil ) + (!pair "x_param_sms_dfa_history" + (!type already_seen 2) + (!fileloc "options.h" 8442) + nil ) + (!pair "x_param_sms_loop_average_count_threshold" + (!type already_seen 2) + (!fileloc "options.h" 8443) + nil ) + (!pair "x_param_sms_max_ii_factor" + (!type already_seen 2) + (!fileloc "options.h" 8444) + nil ) + (!pair "x_param_sms_min_sc" + (!type already_seen 2) + (!fileloc "options.h" 8445) + nil ) + (!pair "x_param_sra_max_propagations" + (!type already_seen 2) + (!fileloc "options.h" 8446) + nil ) + (!pair "x_param_sra_max_scalarization_size_size" + (!type already_seen 2) + (!fileloc "options.h" 8447) + nil ) + (!pair "x_param_sra_max_scalarization_size_speed" + (!type already_seen 2) + (!fileloc "options.h" 8448) + nil ) + (!pair "x_param_ssa_name_def_chain_limit" + (!type already_seen 2) + (!fileloc "options.h" 8449) + nil ) + (!pair "x_param_ssp_buffer_size" + (!type already_seen 2) + (!fileloc "options.h" 8450) + nil ) + (!pair "x_param_stack_clash_protection_guard_size" + (!type already_seen 2) + (!fileloc "options.h" 8451) + nil ) + (!pair "x_param_stack_clash_protection_probe_interval" + (!type already_seen 2) + (!fileloc "options.h" 8452) + nil ) + (!pair "x_param_store_merging_allow_unaligned" + (!type already_seen 2) + (!fileloc "options.h" 8453) + nil ) + (!pair "x_param_store_merging_max_size" + (!type already_seen 2) + (!fileloc "options.h" 8454) + nil ) + (!pair "x_param_switch_conversion_branch_ratio" + (!type already_seen 2) + (!fileloc "options.h" 8455) + nil ) + (!pair "x_param_tm_max_aggregate_size" + (!type already_seen 2) + (!fileloc "options.h" 8456) + nil ) + (!pair "x_param_tracer_dynamic_coverage_feedback" + (!type already_seen 2) + (!fileloc "options.h" 8457) + nil ) + (!pair "x_param_tracer_dynamic_coverage" + (!type already_seen 2) + (!fileloc "options.h" 8458) + nil ) + (!pair "x_param_tracer_max_code_growth" + (!type already_seen 2) + (!fileloc "options.h" 8459) + nil ) + (!pair "x_param_tracer_min_branch_probability_feedback" + (!type already_seen 2) + (!fileloc "options.h" 8460) + nil ) + (!pair "x_param_tracer_min_branch_probability" + (!type already_seen 2) + (!fileloc "options.h" 8461) + nil ) + (!pair "x_param_tracer_min_branch_ratio" + (!type already_seen 2) + (!fileloc "options.h" 8462) + nil ) + (!pair "x_param_tree_reassoc_width" + (!type already_seen 2) + (!fileloc "options.h" 8463) + nil ) + (!pair "x_param_uninit_control_dep_attempts" + (!type already_seen 2) + (!fileloc "options.h" 8464) + nil ) + (!pair "x_param_uninlined_function_insns" + (!type already_seen 2) + (!fileloc "options.h" 8465) + nil ) + (!pair "x_param_uninlined_function_time" + (!type already_seen 2) + (!fileloc "options.h" 8466) + nil ) + (!pair "x_param_uninlined_function_thunk_insns" + (!type already_seen 2) + (!fileloc "options.h" 8467) + nil ) + (!pair "x_param_uninlined_function_thunk_time" + (!type already_seen 2) + (!fileloc "options.h" 8468) + nil ) + (!pair "x_param_unlikely_bb_count_fraction" + (!type already_seen 2) + (!fileloc "options.h" 8469) + nil ) + (!pair "x_param_unroll_jam_max_unroll" + (!type already_seen 2) + (!fileloc "options.h" 8470) + nil ) + (!pair "x_param_unroll_jam_min_percent" + (!type already_seen 2) + (!fileloc "options.h" 8471) + nil ) + (!pair "x_param_use_after_scope_direct_emission_threshold" + (!type already_seen 2) + (!fileloc "options.h" 8472) + nil ) + (!pair "x_param_vect_epilogues_nomask" + (!type already_seen 2) + (!fileloc "options.h" 8473) + nil ) + (!pair "x_param_vect_induction_float" + (!type already_seen 2) + (!fileloc "options.h" 8474) + nil ) + (!pair "x_param_vect_inner_loop_cost_factor" + (!type already_seen 2) + (!fileloc "options.h" 8475) + nil ) + (!pair "x_param_vect_max_peeling_for_alignment" + (!type already_seen 2) + (!fileloc "options.h" 8476) + nil ) + (!pair "x_param_vect_max_version_for_alias_checks" + (!type already_seen 2) + (!fileloc "options.h" 8477) + nil ) + (!pair "x_param_vect_max_version_for_alignment_checks" + (!type already_seen 2) + (!fileloc "options.h" 8478) + nil ) + (!pair "x_param_vect_partial_vector_usage" + (!type already_seen 2) + (!fileloc "options.h" 8479) + nil ) + (!pair "x_flag_sched_stalled_insns" + (!type already_seen 2) + (!fileloc "options.h" 8480) + nil ) + (!pair "x_flag_sched_stalled_insns_dep" + (!type already_seen 2) + (!fileloc "options.h" 8481) + nil ) + (!pair "x_flag_tree_parallelize_loops" + (!type already_seen 2) + (!fileloc "options.h" 8482) + nil ) + (!pair "x_param_evrp_mode" + (!type already_seen 2) + (!fileloc "options.h" 8483) + nil ) + (!pair "x_param_ranger_debug" + (!type already_seen 2) + (!fileloc "options.h" 8484) + nil ) + (!pair "x_param_threader_debug" + (!type already_seen 2) + (!fileloc "options.h" 8485) + nil ) + (!pair "x_param_vrp1_mode" + (!type already_seen 2) + (!fileloc "options.h" 8486) + nil ) + (!pair "x_param_vrp2_mode" + (!type already_seen 2) + (!fileloc "options.h" 8487) + nil ) + (!pair "x_flag_excess_precision" + (!type already_seen 2) + (!fileloc "options.h" 8488) + nil ) + (!pair "x_flag_fp_contract_mode" + (!type already_seen 2) + (!fileloc "options.h" 8489) + nil ) + (!pair "x_flag_ira_algorithm" + (!type already_seen 2) + (!fileloc "options.h" 8490) + nil ) + (!pair "x_flag_ira_region" + (!type already_seen 2) + (!fileloc "options.h" 8491) + nil ) + (!pair "x_flag_live_patching" + (!type already_seen 2) + (!fileloc "options.h" 8492) + nil ) + (!pair "x_flag_reorder_blocks_algorithm" + (!type already_seen 2) + (!fileloc "options.h" 8493) + nil ) + (!pair "x_flag_simd_cost_model" + (!type already_seen 2) + (!fileloc "options.h" 8494) + nil ) + (!pair "x_flag_stack_reuse" + (!type already_seen 2) + (!fileloc "options.h" 8495) + nil ) + (!pair "x_flag_auto_var_init" + (!type already_seen 2) + (!fileloc "options.h" 8496) + nil ) + (!pair "x_flag_vect_cost_model" + (!type already_seen 2) + (!fileloc "options.h" 8497) + nil ) + (!pair "x_optimize" + (!type already_seen 8) + (!fileloc "options.h" 8498) + nil ) + (!pair "x_optimize_size" + (!type already_seen 8) + (!fileloc "options.h" 8499) + nil ) + (!pair "x_optimize_debug" + (!type already_seen 8) + (!fileloc "options.h" 8500) + nil ) + (!pair "x_optimize_fast" + (!type already_seen 8) + (!fileloc "options.h" 8501) + nil ) + (!pair "x_warn_inline" + (!type already_seen 2) + (!fileloc "options.h" 8502) + nil ) + (!pair "x_flag_aggressive_loop_optimizations" + (!type already_seen 2) + (!fileloc "options.h" 8503) + nil ) + (!pair "x_flag_align_functions" + (!type already_seen 2) + (!fileloc "options.h" 8504) + nil ) + (!pair "x_flag_align_jumps" + (!type already_seen 2) + (!fileloc "options.h" 8505) + nil ) + (!pair "x_flag_align_labels" + (!type already_seen 2) + (!fileloc "options.h" 8506) + nil ) + (!pair "x_flag_align_loops" + (!type already_seen 2) + (!fileloc "options.h" 8507) + nil ) + (!pair "x_flag_allocation_dce" + (!type already_seen 2) + (!fileloc "options.h" 8508) + nil ) + (!pair "x_flag_store_data_races" + (!type already_seen 2) + (!fileloc "options.h" 8509) + nil ) + (!pair "x_flag_associative_math" + (!type already_seen 2) + (!fileloc "options.h" 8510) + nil ) + (!pair "x_flag_asynchronous_unwind_tables" + (!type already_seen 2) + (!fileloc "options.h" 8511) + nil ) + (!pair "x_flag_auto_inc_dec" + (!type already_seen 2) + (!fileloc "options.h" 8512) + nil ) + (!pair "x_flag_bit_tests" + (!type already_seen 2) + (!fileloc "options.h" 8513) + nil ) + (!pair "x_flag_branch_on_count_reg" + (!type already_seen 2) + (!fileloc "options.h" 8514) + nil ) + (!pair "x_flag_branch_probabilities" + (!type already_seen 2) + (!fileloc "options.h" 8515) + nil ) + (!pair "x_flag_caller_saves" + (!type already_seen 2) + (!fileloc "options.h" 8516) + nil ) + (!pair "x_flag_code_hoisting" + (!type already_seen 2) + (!fileloc "options.h" 8517) + nil ) + (!pair "x_flag_combine_stack_adjustments" + (!type already_seen 2) + (!fileloc "options.h" 8518) + nil ) + (!pair "x_flag_compare_elim_after_reload" + (!type already_seen 2) + (!fileloc "options.h" 8519) + nil ) + (!pair "x_flag_conserve_stack" + (!type already_seen 2) + (!fileloc "options.h" 8520) + nil ) + (!pair "x_flag_cprop_registers" + (!type already_seen 2) + (!fileloc "options.h" 8521) + nil ) + (!pair "x_flag_crossjumping" + (!type already_seen 2) + (!fileloc "options.h" 8522) + nil ) + (!pair "x_flag_cse_follow_jumps" + (!type already_seen 2) + (!fileloc "options.h" 8523) + nil ) + (!pair "x_flag_cx_fortran_rules" + (!type already_seen 2) + (!fileloc "options.h" 8524) + nil ) + (!pair "x_flag_cx_limited_range" + (!type already_seen 2) + (!fileloc "options.h" 8525) + nil ) + (!pair "x_flag_dce" + (!type already_seen 2) + (!fileloc "options.h" 8526) + nil ) + (!pair "x_flag_defer_pop" + (!type already_seen 2) + (!fileloc "options.h" 8527) + nil ) + (!pair "x_flag_delayed_branch" + (!type already_seen 2) + (!fileloc "options.h" 8528) + nil ) + (!pair "x_flag_delete_dead_exceptions" + (!type already_seen 2) + (!fileloc "options.h" 8529) + nil ) + (!pair "x_flag_delete_null_pointer_checks" + (!type already_seen 2) + (!fileloc "options.h" 8530) + nil ) + (!pair "x_flag_devirtualize" + (!type already_seen 2) + (!fileloc "options.h" 8531) + nil ) + (!pair "x_flag_devirtualize_speculatively" + (!type already_seen 2) + (!fileloc "options.h" 8532) + nil ) + (!pair "x_flag_dse" + (!type already_seen 2) + (!fileloc "options.h" 8533) + nil ) + (!pair "x_flag_early_inlining" + (!type already_seen 2) + (!fileloc "options.h" 8534) + nil ) + (!pair "x_flag_exceptions" + (!type already_seen 2) + (!fileloc "options.h" 8535) + nil ) + (!pair "x_flag_expensive_optimizations" + (!type already_seen 2) + (!fileloc "options.h" 8536) + nil ) + (!pair "x_flag_finite_loops" + (!type already_seen 2) + (!fileloc "options.h" 8537) + nil ) + (!pair "x_flag_finite_math_only" + (!type already_seen 2) + (!fileloc "options.h" 8538) + nil ) + (!pair "x_flag_float_store" + (!type already_seen 2) + (!fileloc "options.h" 8539) + nil ) + (!pair "x_flag_fold_simple_inlines" + (!type already_seen 2) + (!fileloc "options.h" 8540) + nil ) + (!pair "x_flag_forward_propagate" + (!type already_seen 2) + (!fileloc "options.h" 8541) + nil ) + (!pair "x_flag_fp_int_builtin_inexact" + (!type already_seen 2) + (!fileloc "options.h" 8542) + nil ) + (!pair "x_flag_no_function_cse" + (!type already_seen 2) + (!fileloc "options.h" 8543) + nil ) + (!pair "x_flag_gcse" + (!type already_seen 2) + (!fileloc "options.h" 8544) + nil ) + (!pair "x_flag_gcse_after_reload" + (!type already_seen 2) + (!fileloc "options.h" 8545) + nil ) + (!pair "x_flag_gcse_las" + (!type already_seen 2) + (!fileloc "options.h" 8546) + nil ) + (!pair "x_flag_gcse_lm" + (!type already_seen 2) + (!fileloc "options.h" 8547) + nil ) + (!pair "x_flag_gcse_sm" + (!type already_seen 2) + (!fileloc "options.h" 8548) + nil ) + (!pair "x_flag_graphite" + (!type already_seen 2) + (!fileloc "options.h" 8549) + nil ) + (!pair "x_flag_graphite_identity" + (!type already_seen 2) + (!fileloc "options.h" 8550) + nil ) + (!pair "x_flag_guess_branch_prob" + (!type already_seen 2) + (!fileloc "options.h" 8551) + nil ) + (!pair "x_flag_harden_compares" + (!type already_seen 2) + (!fileloc "options.h" 8552) + nil ) + (!pair "x_flag_harden_conditional_branches" + (!type already_seen 2) + (!fileloc "options.h" 8553) + nil ) + (!pair "x_flag_hoist_adjacent_loads" + (!type already_seen 2) + (!fileloc "options.h" 8554) + nil ) + (!pair "x_flag_if_conversion" + (!type already_seen 2) + (!fileloc "options.h" 8555) + nil ) + (!pair "x_flag_if_conversion2" + (!type already_seen 2) + (!fileloc "options.h" 8556) + nil ) + (!pair "x_flag_indirect_inlining" + (!type already_seen 2) + (!fileloc "options.h" 8557) + nil ) + (!pair "x_flag_no_inline" + (!type already_seen 2) + (!fileloc "options.h" 8558) + nil ) + (!pair "x_flag_inline_atomics" + (!type already_seen 2) + (!fileloc "options.h" 8559) + nil ) + (!pair "x_flag_inline_functions" + (!type already_seen 2) + (!fileloc "options.h" 8560) + nil ) + (!pair "x_flag_inline_functions_called_once" + (!type already_seen 2) + (!fileloc "options.h" 8561) + nil ) + (!pair "x_flag_inline_small_functions" + (!type already_seen 2) + (!fileloc "options.h" 8562) + nil ) + (!pair "x_flag_ipa_bit_cp" + (!type already_seen 2) + (!fileloc "options.h" 8563) + nil ) + (!pair "x_flag_ipa_cp" + (!type already_seen 2) + (!fileloc "options.h" 8564) + nil ) + (!pair "x_flag_ipa_cp_clone" + (!type already_seen 2) + (!fileloc "options.h" 8565) + nil ) + (!pair "x_flag_ipa_icf" + (!type already_seen 2) + (!fileloc "options.h" 8566) + nil ) + (!pair "x_flag_ipa_icf_functions" + (!type already_seen 2) + (!fileloc "options.h" 8567) + nil ) + (!pair "x_flag_ipa_icf_variables" + (!type already_seen 2) + (!fileloc "options.h" 8568) + nil ) + (!pair "x_flag_ipa_modref" + (!type already_seen 2) + (!fileloc "options.h" 8569) + nil ) + (!pair "x_flag_ipa_profile" + (!type already_seen 2) + (!fileloc "options.h" 8570) + nil ) + (!pair "x_flag_ipa_pta" + (!type already_seen 2) + (!fileloc "options.h" 8571) + nil ) + (!pair "x_flag_ipa_pure_const" + (!type already_seen 2) + (!fileloc "options.h" 8572) + nil ) + (!pair "x_flag_ipa_ra" + (!type already_seen 2) + (!fileloc "options.h" 8573) + nil ) + (!pair "x_flag_ipa_reference" + (!type already_seen 2) + (!fileloc "options.h" 8574) + nil ) + (!pair "x_flag_ipa_reference_addressable" + (!type already_seen 2) + (!fileloc "options.h" 8575) + nil ) + (!pair "x_flag_ipa_sra" + (!type already_seen 2) + (!fileloc "options.h" 8576) + nil ) + (!pair "x_flag_ipa_stack_alignment" + (!type already_seen 2) + (!fileloc "options.h" 8577) + nil ) + (!pair "x_flag_ipa_strict_aliasing" + (!type already_seen 2) + (!fileloc "options.h" 8578) + nil ) + (!pair "x_flag_ipa_vrp" + (!type already_seen 2) + (!fileloc "options.h" 8579) + nil ) + (!pair "x_flag_ira_hoist_pressure" + (!type already_seen 2) + (!fileloc "options.h" 8580) + nil ) + (!pair "x_flag_ira_loop_pressure" + (!type already_seen 2) + (!fileloc "options.h" 8581) + nil ) + (!pair "x_flag_ira_share_save_slots" + (!type already_seen 2) + (!fileloc "options.h" 8582) + nil ) + (!pair "x_flag_ira_share_spill_slots" + (!type already_seen 2) + (!fileloc "options.h" 8583) + nil ) + (!pair "x_flag_isolate_erroneous_paths_attribute" + (!type already_seen 2) + (!fileloc "options.h" 8584) + nil ) + (!pair "x_flag_isolate_erroneous_paths_dereference" + (!type already_seen 2) + (!fileloc "options.h" 8585) + nil ) + (!pair "x_flag_ivopts" + (!type already_seen 2) + (!fileloc "options.h" 8586) + nil ) + (!pair "x_flag_jump_tables" + (!type already_seen 2) + (!fileloc "options.h" 8587) + nil ) + (!pair "x_flag_keep_gc_roots_live" + (!type already_seen 2) + (!fileloc "options.h" 8588) + nil ) + (!pair "x_flag_lifetime_dse" + (!type already_seen 2) + (!fileloc "options.h" 8589) + nil ) + (!pair "x_flag_limit_function_alignment" + (!type already_seen 2) + (!fileloc "options.h" 8590) + nil ) + (!pair "x_flag_live_range_shrinkage" + (!type already_seen 2) + (!fileloc "options.h" 8591) + nil ) + (!pair "x_flag_loop_interchange" + (!type already_seen 2) + (!fileloc "options.h" 8592) + nil ) + (!pair "x_flag_loop_nest_optimize" + (!type already_seen 2) + (!fileloc "options.h" 8593) + nil ) + (!pair "x_flag_loop_parallelize_all" + (!type already_seen 2) + (!fileloc "options.h" 8594) + nil ) + (!pair "x_flag_unroll_jam" + (!type already_seen 2) + (!fileloc "options.h" 8595) + nil ) + (!pair "x_flag_lra_remat" + (!type already_seen 2) + (!fileloc "options.h" 8596) + nil ) + (!pair "x_flag_errno_math" + (!type already_seen 2) + (!fileloc "options.h" 8597) + nil ) + (!pair "x_flag_modulo_sched" + (!type already_seen 2) + (!fileloc "options.h" 8598) + nil ) + (!pair "x_flag_modulo_sched_allow_regmoves" + (!type already_seen 2) + (!fileloc "options.h" 8599) + nil ) + (!pair "x_flag_move_loop_invariants" + (!type already_seen 2) + (!fileloc "options.h" 8600) + nil ) + (!pair "x_flag_move_loop_stores" + (!type already_seen 2) + (!fileloc "options.h" 8601) + nil ) + (!pair "x_flag_non_call_exceptions" + (!type already_seen 2) + (!fileloc "options.h" 8602) + nil ) + (!pair "x_flag_nothrow_opt" + (!type already_seen 2) + (!fileloc "options.h" 8603) + nil ) + (!pair "x_flag_omit_frame_pointer" + (!type already_seen 2) + (!fileloc "options.h" 8604) + nil ) + (!pair "x_flag_opt_info" + (!type already_seen 2) + (!fileloc "options.h" 8605) + nil ) + (!pair "x_flag_optimize_sibling_calls" + (!type already_seen 2) + (!fileloc "options.h" 8606) + nil ) + (!pair "x_flag_optimize_strlen" + (!type already_seen 2) + (!fileloc "options.h" 8607) + nil ) + (!pair "x_flag_pack_struct" + (!type already_seen 2) + (!fileloc "options.h" 8608) + nil ) + (!pair "x_flag_partial_inlining" + (!type already_seen 2) + (!fileloc "options.h" 8609) + nil ) + (!pair "x_flag_peel_loops" + (!type already_seen 2) + (!fileloc "options.h" 8610) + nil ) + (!pair "x_flag_no_peephole" + (!type already_seen 2) + (!fileloc "options.h" 8611) + nil ) + (!pair "x_flag_peephole2" + (!type already_seen 2) + (!fileloc "options.h" 8612) + nil ) + (!pair "x_flag_plt" + (!type already_seen 2) + (!fileloc "options.h" 8613) + nil ) + (!pair "x_flag_predictive_commoning" + (!type already_seen 2) + (!fileloc "options.h" 8614) + nil ) + (!pair "x_flag_prefetch_loop_arrays" + (!type already_seen 2) + (!fileloc "options.h" 8615) + nil ) + (!pair "x_flag_printf_return_value" + (!type already_seen 2) + (!fileloc "options.h" 8616) + nil ) + (!pair "x_flag_profile_partial_training" + (!type already_seen 2) + (!fileloc "options.h" 8617) + nil ) + (!pair "x_flag_profile_reorder_functions" + (!type already_seen 2) + (!fileloc "options.h" 8618) + nil ) + (!pair "x_flag_reciprocal_math" + (!type already_seen 2) + (!fileloc "options.h" 8619) + nil ) + (!pair "x_flag_ree" + (!type already_seen 2) + (!fileloc "options.h" 8620) + nil ) + (!pair "x_flag_pcc_struct_return" + (!type already_seen 2) + (!fileloc "options.h" 8621) + nil ) + (!pair "x_flag_rename_registers" + (!type already_seen 2) + (!fileloc "options.h" 8622) + nil ) + (!pair "x_flag_reorder_blocks" + (!type already_seen 2) + (!fileloc "options.h" 8623) + nil ) + (!pair "x_flag_reorder_blocks_and_partition" + (!type already_seen 2) + (!fileloc "options.h" 8624) + nil ) + (!pair "x_flag_reorder_functions" + (!type already_seen 2) + (!fileloc "options.h" 8625) + nil ) + (!pair "x_flag_rerun_cse_after_loop" + (!type already_seen 2) + (!fileloc "options.h" 8626) + nil ) + (!pair "x_flag_resched_modulo_sched" + (!type already_seen 2) + (!fileloc "options.h" 8627) + nil ) + (!pair "x_flag_rounding_math" + (!type already_seen 2) + (!fileloc "options.h" 8628) + nil ) + (!pair "x_flag_rtti" + (!type already_seen 2) + (!fileloc "options.h" 8629) + nil ) + (!pair "x_flag_save_optimization_record" + (!type already_seen 2) + (!fileloc "options.h" 8630) + nil ) + (!pair "x_flag_sched_critical_path_heuristic" + (!type already_seen 2) + (!fileloc "options.h" 8631) + nil ) + (!pair "x_flag_sched_dep_count_heuristic" + (!type already_seen 2) + (!fileloc "options.h" 8632) + nil ) + (!pair "x_flag_sched_group_heuristic" + (!type already_seen 2) + (!fileloc "options.h" 8633) + nil ) + (!pair "x_flag_schedule_interblock" + (!type already_seen 2) + (!fileloc "options.h" 8634) + nil ) + (!pair "x_flag_sched_last_insn_heuristic" + (!type already_seen 2) + (!fileloc "options.h" 8635) + nil ) + (!pair "x_flag_sched_pressure" + (!type already_seen 2) + (!fileloc "options.h" 8636) + nil ) + (!pair "x_flag_sched_rank_heuristic" + (!type already_seen 2) + (!fileloc "options.h" 8637) + nil ) + (!pair "x_flag_schedule_speculative" + (!type already_seen 2) + (!fileloc "options.h" 8638) + nil ) + (!pair "x_flag_sched_spec_insn_heuristic" + (!type already_seen 2) + (!fileloc "options.h" 8639) + nil ) + (!pair "x_flag_schedule_speculative_load" + (!type already_seen 2) + (!fileloc "options.h" 8640) + nil ) + (!pair "x_flag_schedule_speculative_load_dangerous" + (!type already_seen 2) + (!fileloc "options.h" 8641) + nil ) + (!pair "x_flag_sched2_use_superblocks" + (!type already_seen 2) + (!fileloc "options.h" 8642) + nil ) + (!pair "x_flag_schedule_fusion" + (!type already_seen 2) + (!fileloc "options.h" 8643) + nil ) + (!pair "x_flag_schedule_insns" + (!type already_seen 2) + (!fileloc "options.h" 8644) + nil ) + (!pair "x_flag_schedule_insns_after_reload" + (!type already_seen 2) + (!fileloc "options.h" 8645) + nil ) + (!pair "x_flag_section_anchors" + (!type already_seen 2) + (!fileloc "options.h" 8646) + nil ) + (!pair "x_flag_sel_sched_pipelining" + (!type already_seen 2) + (!fileloc "options.h" 8647) + nil ) + (!pair "x_flag_sel_sched_pipelining_outer_loops" + (!type already_seen 2) + (!fileloc "options.h" 8648) + nil ) + (!pair "x_flag_sel_sched_reschedule_pipelined" + (!type already_seen 2) + (!fileloc "options.h" 8649) + nil ) + (!pair "x_flag_selective_scheduling" + (!type already_seen 2) + (!fileloc "options.h" 8650) + nil ) + (!pair "x_flag_selective_scheduling2" + (!type already_seen 2) + (!fileloc "options.h" 8651) + nil ) + (!pair "x_flag_semantic_interposition" + (!type already_seen 2) + (!fileloc "options.h" 8652) + nil ) + (!pair "x_flag_short_enums" + (!type already_seen 2) + (!fileloc "options.h" 8653) + nil ) + (!pair "x_flag_short_wchar" + (!type already_seen 2) + (!fileloc "options.h" 8654) + nil ) + (!pair "x_flag_shrink_wrap" + (!type already_seen 2) + (!fileloc "options.h" 8655) + nil ) + (!pair "x_flag_shrink_wrap_separate" + (!type already_seen 2) + (!fileloc "options.h" 8656) + nil ) + (!pair "x_flag_signaling_nans" + (!type already_seen 2) + (!fileloc "options.h" 8657) + nil ) + (!pair "x_flag_signed_zeros" + (!type already_seen 2) + (!fileloc "options.h" 8658) + nil ) + (!pair "x_flag_single_precision_constant" + (!type already_seen 2) + (!fileloc "options.h" 8659) + nil ) + (!pair "x_flag_split_ivs_in_unroller" + (!type already_seen 2) + (!fileloc "options.h" 8660) + nil ) + (!pair "x_flag_split_loops" + (!type already_seen 2) + (!fileloc "options.h" 8661) + nil ) + (!pair "x_flag_split_paths" + (!type already_seen 2) + (!fileloc "options.h" 8662) + nil ) + (!pair "x_flag_split_wide_types" + (!type already_seen 2) + (!fileloc "options.h" 8663) + nil ) + (!pair "x_flag_split_wide_types_early" + (!type already_seen 2) + (!fileloc "options.h" 8664) + nil ) + (!pair "x_flag_ssa_backprop" + (!type already_seen 2) + (!fileloc "options.h" 8665) + nil ) + (!pair "x_flag_ssa_phiopt" + (!type already_seen 2) + (!fileloc "options.h" 8666) + nil ) + (!pair "x_flag_stack_clash_protection" + (!type already_seen 2) + (!fileloc "options.h" 8667) + nil ) + (!pair "x_flag_stack_protect" + (!type already_seen 2) + (!fileloc "options.h" 8668) + nil ) + (!pair "x_flag_stdarg_opt" + (!type already_seen 2) + (!fileloc "options.h" 8669) + nil ) + (!pair "x_flag_store_merging" + (!type already_seen 2) + (!fileloc "options.h" 8670) + nil ) + (!pair "x_flag_strict_aliasing" + (!type already_seen 2) + (!fileloc "options.h" 8671) + nil ) + (!pair "x_flag_strict_enums" + (!type already_seen 2) + (!fileloc "options.h" 8672) + nil ) + (!pair "x_flag_strict_volatile_bitfields" + (!type already_seen 2) + (!fileloc "options.h" 8673) + nil ) + (!pair "x_flag_thread_jumps" + (!type already_seen 2) + (!fileloc "options.h" 8674) + nil ) + (!pair "x_flag_threadsafe_statics" + (!type already_seen 2) + (!fileloc "options.h" 8675) + nil ) + (!pair "x_flag_toplevel_reorder" + (!type already_seen 2) + (!fileloc "options.h" 8676) + nil ) + (!pair "x_flag_tracer" + (!type already_seen 2) + (!fileloc "options.h" 8677) + nil ) + (!pair "x_flag_trapping_math" + (!type already_seen 2) + (!fileloc "options.h" 8678) + nil ) + (!pair "x_flag_trapv" + (!type already_seen 2) + (!fileloc "options.h" 8679) + nil ) + (!pair "x_flag_tree_bit_ccp" + (!type already_seen 2) + (!fileloc "options.h" 8680) + nil ) + (!pair "x_flag_tree_builtin_call_dce" + (!type already_seen 2) + (!fileloc "options.h" 8681) + nil ) + (!pair "x_flag_tree_ccp" + (!type already_seen 2) + (!fileloc "options.h" 8682) + nil ) + (!pair "x_flag_tree_ch" + (!type already_seen 2) + (!fileloc "options.h" 8683) + nil ) + (!pair "x_flag_tree_coalesce_vars" + (!type already_seen 2) + (!fileloc "options.h" 8684) + nil ) + (!pair "x_flag_tree_copy_prop" + (!type already_seen 2) + (!fileloc "options.h" 8685) + nil ) + (!pair "x_flag_tree_cselim" + (!type already_seen 2) + (!fileloc "options.h" 8686) + nil ) + (!pair "x_flag_tree_dce" + (!type already_seen 2) + (!fileloc "options.h" 8687) + nil ) + (!pair "x_flag_tree_dom" + (!type already_seen 2) + (!fileloc "options.h" 8688) + nil ) + (!pair "x_flag_tree_dse" + (!type already_seen 2) + (!fileloc "options.h" 8689) + nil ) + (!pair "x_flag_tree_forwprop" + (!type already_seen 2) + (!fileloc "options.h" 8690) + nil ) + (!pair "x_flag_tree_fre" + (!type already_seen 2) + (!fileloc "options.h" 8691) + nil ) + (!pair "x_flag_tree_loop_distribute_patterns" + (!type already_seen 2) + (!fileloc "options.h" 8692) + nil ) + (!pair "x_flag_tree_loop_distribution" + (!type already_seen 2) + (!fileloc "options.h" 8693) + nil ) + (!pair "x_flag_tree_loop_if_convert" + (!type already_seen 2) + (!fileloc "options.h" 8694) + nil ) + (!pair "x_flag_tree_loop_im" + (!type already_seen 2) + (!fileloc "options.h" 8695) + nil ) + (!pair "x_flag_tree_loop_ivcanon" + (!type already_seen 2) + (!fileloc "options.h" 8696) + nil ) + (!pair "x_flag_tree_loop_optimize" + (!type already_seen 2) + (!fileloc "options.h" 8697) + nil ) + (!pair "x_flag_tree_loop_vectorize" + (!type already_seen 2) + (!fileloc "options.h" 8698) + nil ) + (!pair "x_flag_tree_live_range_split" + (!type already_seen 2) + (!fileloc "options.h" 8699) + nil ) + (!pair "x_flag_tree_partial_pre" + (!type already_seen 2) + (!fileloc "options.h" 8700) + nil ) + (!pair "x_flag_tree_phiprop" + (!type already_seen 2) + (!fileloc "options.h" 8701) + nil ) + (!pair "x_flag_tree_pre" + (!type already_seen 2) + (!fileloc "options.h" 8702) + nil ) + (!pair "x_flag_tree_pta" + (!type already_seen 2) + (!fileloc "options.h" 8703) + nil ) + (!pair "x_flag_tree_reassoc" + (!type already_seen 2) + (!fileloc "options.h" 8704) + nil ) + (!pair "x_flag_tree_scev_cprop" + (!type already_seen 2) + (!fileloc "options.h" 8705) + nil ) + (!pair "x_flag_tree_sink" + (!type already_seen 2) + (!fileloc "options.h" 8706) + nil ) + (!pair "x_flag_tree_slp_vectorize" + (!type already_seen 2) + (!fileloc "options.h" 8707) + nil ) + (!pair "x_flag_tree_slsr" + (!type already_seen 2) + (!fileloc "options.h" 8708) + nil ) + (!pair "x_flag_tree_sra" + (!type already_seen 2) + (!fileloc "options.h" 8709) + nil ) + (!pair "x_flag_tree_switch_conversion" + (!type already_seen 2) + (!fileloc "options.h" 8710) + nil ) + (!pair "x_flag_tree_tail_merge" + (!type already_seen 2) + (!fileloc "options.h" 8711) + nil ) + (!pair "x_flag_tree_ter" + (!type already_seen 2) + (!fileloc "options.h" 8712) + nil ) + (!pair "x_flag_tree_vectorize" + (!type already_seen 2) + (!fileloc "options.h" 8713) + nil ) + (!pair "x_flag_tree_vrp" + (!type already_seen 2) + (!fileloc "options.h" 8714) + nil ) + (!pair "x_flag_unconstrained_commons" + (!type already_seen 2) + (!fileloc "options.h" 8715) + nil ) + (!pair "x_flag_unroll_all_loops" + (!type already_seen 2) + (!fileloc "options.h" 8716) + nil ) + (!pair "x_flag_cunroll_grow_size" + (!type already_seen 2) + (!fileloc "options.h" 8717) + nil ) + (!pair "x_flag_unroll_loops" + (!type already_seen 2) + (!fileloc "options.h" 8718) + nil ) + (!pair "x_flag_unsafe_math_optimizations" + (!type already_seen 2) + (!fileloc "options.h" 8719) + nil ) + (!pair "x_flag_unswitch_loops" + (!type already_seen 2) + (!fileloc "options.h" 8720) + nil ) + (!pair "x_flag_unwind_tables" + (!type already_seen 2) + (!fileloc "options.h" 8721) + nil ) + (!pair "x_flag_var_tracking" + (!type already_seen 2) + (!fileloc "options.h" 8722) + nil ) + (!pair "x_flag_var_tracking_assignments" + (!type already_seen 2) + (!fileloc "options.h" 8723) + nil ) + (!pair "x_flag_var_tracking_assignments_toggle" + (!type already_seen 2) + (!fileloc "options.h" 8724) + nil ) + (!pair "x_flag_var_tracking_uninit" + (!type already_seen 2) + (!fileloc "options.h" 8725) + nil ) + (!pair "x_flag_variable_expansion_in_unroller" + (!type already_seen 2) + (!fileloc "options.h" 8726) + nil ) + (!pair "x_flag_version_loops_for_strides" + (!type already_seen 2) + (!fileloc "options.h" 8727) + nil ) + (!pair "x_flag_value_profile_transformations" + (!type already_seen 2) + (!fileloc "options.h" 8728) + nil ) + (!pair "x_flag_web" + (!type already_seen 2) + (!fileloc "options.h" 8729) + nil ) + (!pair "x_flag_wrapv" + (!type already_seen 2) + (!fileloc "options.h" 8730) + nil ) + (!pair "x_flag_wrapv_pointer" + (!type already_seen 2) + (!fileloc "options.h" 8731) + nil ) + (!pair "x_debug_nonbind_markers_p" + (!type already_seen 2) + (!fileloc "options.h" 8732) + nil ) + (!pair "explicit_mask" + (!type array 575 nil gc_used "8" + (!type already_seen 2) + ) + (!fileloc "options.h" 8734) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "tree-core.h" 2011) + nil ) + (!pair "optabs" + (!type already_seen 3) + (!srcfileloc "tree-core.h" 2015) + (!options + (!option atomic string "") + ) + ) + (!pair "base_optabs" + (!type pointer 576 nil gc_unused + (!type struct 577 + (!type already_seen 576) + gc_unused "target_optabs"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "tree-core.h" 2019) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2084) + (!options + (!option tag string "TS_OPTIMIZATION") + ) + ) + (!pair "target_option" + (!type struct 578 nil gc_used "tree_target_option" + (!srcfileloc "tree-core.h" 2036) + (!fields 3 + (!pair "base" + (!type already_seen 25) + (!srcfileloc "tree-core.h" 2029) + nil ) + (!pair "globals" + (!type pointer 579 nil gc_used + (!type struct 580 + (!type already_seen 579) + gc_pointed_to "target_globals" + (!srcfileloc "target-globals.h" 64) + (!fields 17 + (!pair "flag_state" + (!type pointer 581 nil gc_unused + (!type struct 582 + (!type already_seen 581) + gc_unused "target_flag_state"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 47) + (!options + (!option skip string "") + ) + ) + (!pair "regs" + (!type pointer 583 nil gc_unused + (!type struct 584 + (!type already_seen 583) + gc_unused "target_regs"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 48) + (!options + (!option skip string "") + ) + ) + (!pair "rtl" + (!type pointer 585 nil gc_used + (!type struct 586 + (!type already_seen 585) + gc_pointed_to "target_rtl" + (!srcfileloc "rtl.h" 3920) + (!fields 8 + (!pair "x_global_rtl" + (!type array 587 nil gc_used "GR_MAX" + (!type already_seen 99) + ) + (!srcfileloc "rtl.h" 3892) + nil ) + (!pair "x_pic_offset_table_rtx" + (!type already_seen 99) + (!srcfileloc "rtl.h" 3895) + nil ) + (!pair "x_return_address_pointer_rtx" + (!type already_seen 99) + (!srcfileloc "rtl.h" 3900) + nil ) + (!pair "x_initial_regno_reg_rtx" + (!type array 588 nil gc_used "FIRST_PSEUDO_REGISTER" + (!type already_seen 99) + ) + (!srcfileloc "rtl.h" 3906) + nil ) + (!pair "x_top_of_stack" + (!type array 589 nil gc_used "MAX_MACHINE_MODE" + (!type already_seen 99) + ) + (!srcfileloc "rtl.h" 3909) + nil ) + (!pair "x_static_reg_base_value" + (!type array 590 nil gc_used "FIRST_PSEUDO_REGISTER" + (!type already_seen 99) + ) + (!srcfileloc "rtl.h" 3913) + nil ) + (!pair "x_mode_mem_attrs" + (!type array 591 nil gc_used "(int) MAX_MACHINE_MODE" + (!type already_seen 227) + ) + (!srcfileloc "rtl.h" 3916) + nil ) + (!pair "target_specific_initialized" + (!type already_seen 2) + (!srcfileloc "rtl.h" 3919) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "target-globals.h" 49) + nil ) + (!pair "recog" + (!type pointer 592 nil gc_unused + (!type struct 593 + (!type already_seen 592) + gc_unused "target_recog"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 50) + (!options + (!option skip string "") + ) + ) + (!pair "hard_regs" + (!type pointer 594 nil gc_unused + (!type struct 595 + (!type already_seen 594) + gc_unused "target_hard_regs"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 51) + (!options + (!option skip string "") + ) + ) + (!pair "function_abi_info" + (!type pointer 596 nil gc_unused + (!type struct 597 + (!type already_seen 596) + gc_unused "target_function_abi_info"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 52) + (!options + (!option skip string "") + ) + ) + (!pair "reload" + (!type pointer 598 nil gc_unused + (!type struct 599 + (!type already_seen 598) + gc_unused "target_reload"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 53) + (!options + (!option skip string "") + ) + ) + (!pair "expmed" + (!type pointer 600 nil gc_unused + (!type struct 601 + (!type already_seen 600) + gc_unused "target_expmed"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 54) + (!options + (!option skip string "") + ) + ) + (!pair "optabs" + (!type already_seen 576) + (!srcfileloc "target-globals.h" 55) + (!options + (!option skip string "") + ) + ) + (!pair "libfuncs" + (!type pointer 602 nil gc_used + (!type struct 603 + (!type already_seen 602) + gc_pointed_to "target_libfuncs" + (!srcfileloc "libfuncs.h" 62) + (!fields 2 + (!pair "x_libfunc_table" + (!type array 604 nil gc_used "LTI_MAX" + (!type already_seen 99) + ) + (!srcfileloc "libfuncs.h" 58) + nil ) + (!pair "x_libfunc_hash" + (!type pointer 605 nil gc_used + (!type user_struct 606 + (!type already_seen 605) + gc_pointed_to "hash_table" + (!srcfileloc "libfuncs.h" 61) + (!fields 1 + (!pair "libfunc_hasher" + (!type struct 607 nil gc_used "libfunc_hasher" + (!srcfileloc "libfuncs.h" 61) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "libfuncs.h" 61) + nil ) + ) + ) + ) + (!srcfileloc "libfuncs.h" 61) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "target-globals.h" 56) + nil ) + (!pair "cfgloop" + (!type pointer 608 nil gc_unused + (!type struct 609 + (!type already_seen 608) + gc_unused "target_cfgloop"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 57) + (!options + (!option skip string "") + ) + ) + (!pair "ira" + (!type pointer 610 nil gc_unused + (!type struct 611 + (!type already_seen 610) + gc_unused "target_ira"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 58) + (!options + (!option skip string "") + ) + ) + (!pair "ira_int" + (!type pointer 612 nil gc_unused + (!type struct 613 + (!type already_seen 612) + gc_unused "target_ira_int"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 59) + (!options + (!option skip string "") + ) + ) + (!pair "builtins" + (!type pointer 614 nil gc_unused + (!type struct 615 + (!type already_seen 614) + gc_unused "target_builtins"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 60) + (!options + (!option skip string "") + ) + ) + (!pair "gcse" + (!type pointer 616 nil gc_unused + (!type struct 617 + (!type already_seen 616) + gc_unused "target_gcse"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 61) + (!options + (!option skip string "") + ) + ) + (!pair "bb_reorder" + (!type pointer 618 nil gc_unused + (!type struct 619 + (!type already_seen 618) + gc_unused "target_bb_reorder"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 62) + (!options + (!option skip string "") + ) + ) + (!pair "lower_subreg" + (!type pointer 620 nil gc_unused + (!type struct 621 + (!type already_seen 620) + gc_unused "target_lower_subreg"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "target-globals.h" 63) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "tree-core.h" 2032) + nil ) + (!pair "opts" + (!type pointer 622 nil gc_used + (!type struct 623 + (!type already_seen 622) + gc_pointed_to "cl_target_option" + (!fileloc "options.h" 8798) + (!fields 57 + (!pair "x_ix86_isa_flags2_explicit" + (!type already_seen 2) + (!fileloc "options.h" 8740) + nil ) + (!pair "x_ix86_isa_flags_explicit" + (!type already_seen 2) + (!fileloc "options.h" 8741) + nil ) + (!pair "x_ix86_arch_string" + (!type already_seen 11) + (!fileloc "options.h" 8742) + nil ) + (!pair "x_ix86_recip_name" + (!type already_seen 11) + (!fileloc "options.h" 8743) + nil ) + (!pair "x_ix86_tune_ctrl_string" + (!type already_seen 11) + (!fileloc "options.h" 8744) + nil ) + (!pair "x_ix86_tune_memcpy_strategy" + (!type already_seen 11) + (!fileloc "options.h" 8745) + nil ) + (!pair "x_ix86_tune_memset_strategy" + (!type already_seen 11) + (!fileloc "options.h" 8746) + nil ) + (!pair "x_ix86_tune_string" + (!type already_seen 11) + (!fileloc "options.h" 8747) + nil ) + (!pair "x_ix86_stack_protector_guard_offset" + (!type already_seen 2) + (!fileloc "options.h" 8748) + nil ) + (!pair "x_ix86_stack_protector_guard_reg" + (!type already_seen 8) + (!fileloc "options.h" 8749) + nil ) + (!pair "x_ix86_unsafe_math_optimizations" + (!type already_seen 2) + (!fileloc "options.h" 8750) + nil ) + (!pair "x_target_flags" + (!type already_seen 2) + (!fileloc "options.h" 8751) + nil ) + (!pair "x_ix86_isa_flags" + (!type already_seen 2) + (!fileloc "options.h" 8752) + nil ) + (!pair "x_ix86_isa_flags2" + (!type already_seen 2) + (!fileloc "options.h" 8753) + nil ) + (!pair "x_ix86_fpmath" + (!type already_seen 2) + (!fileloc "options.h" 8754) + nil ) + (!pair "x_ix86_target_flags" + (!type already_seen 2) + (!fileloc "options.h" 8755) + nil ) + (!pair "x_ix86_move_max" + (!type already_seen 2) + (!fileloc "options.h" 8756) + nil ) + (!pair "x_prefer_vector_width_type" + (!type already_seen 2) + (!fileloc "options.h" 8757) + nil ) + (!pair "x_ix86_stack_protector_guard_offset_str" + (!type already_seen 11) + (!fileloc "options.h" 8758) + nil ) + (!pair "x_ix86_stack_protector_guard_reg_str" + (!type already_seen 11) + (!fileloc "options.h" 8759) + nil ) + (!pair "x_ix86_stack_protector_guard_symbol_str" + (!type already_seen 11) + (!fileloc "options.h" 8760) + nil ) + (!pair "x_ix86_store_max" + (!type already_seen 2) + (!fileloc "options.h" 8761) + nil ) + (!pair "x_ix86_asm_dialect" + (!type already_seen 2) + (!fileloc "options.h" 8762) + nil ) + (!pair "x_ix86_abi" + (!type already_seen 2) + (!fileloc "options.h" 8763) + nil ) + (!pair "x_ix86_stack_protector_guard" + (!type already_seen 2) + (!fileloc "options.h" 8764) + nil ) + (!pair "x_ix86_stringop_alg" + (!type already_seen 2) + (!fileloc "options.h" 8765) + nil ) + (!pair "x_ix86_tls_dialect" + (!type already_seen 2) + (!fileloc "options.h" 8766) + nil ) + (!pair "x_ix86_cmodel" + (!type already_seen 2) + (!fileloc "options.h" 8767) + nil ) + (!pair "x_ix86_excess_precision" + (!type already_seen 2) + (!fileloc "options.h" 8768) + nil ) + (!pair "x_ix86_veclibabi_type" + (!type already_seen 2) + (!fileloc "options.h" 8769) + nil ) + (!pair "x_ix86_pmode" + (!type already_seen 2) + (!fileloc "options.h" 8770) + nil ) + (!pair "x_ix86_branch_cost" + (!type already_seen 2) + (!fileloc "options.h" 8771) + nil ) + (!pair "x_ix86_dump_tunes" + (!type already_seen 2) + (!fileloc "options.h" 8772) + nil ) + (!pair "x_ix86_force_align_arg_pointer" + (!type already_seen 2) + (!fileloc "options.h" 8773) + nil ) + (!pair "x_ix86_force_drap" + (!type already_seen 2) + (!fileloc "options.h" 8774) + nil ) + (!pair "x_ix86_section_threshold" + (!type already_seen 2) + (!fileloc "options.h" 8775) + nil ) + (!pair "x_ix86_sse2avx" + (!type already_seen 2) + (!fileloc "options.h" 8776) + nil ) + (!pair "x_ix86_tune_no_default" + (!type already_seen 2) + (!fileloc "options.h" 8777) + nil ) + (!pair "x_recip_mask_explicit" + (!type already_seen 2) + (!fileloc "options.h" 8778) + nil ) + (!pair "x_ix86_incoming_stack_boundary_arg" + (!type already_seen 2) + (!fileloc "options.h" 8779) + nil ) + (!pair "x_ix86_preferred_stack_boundary_arg" + (!type already_seen 2) + (!fileloc "options.h" 8780) + nil ) + (!pair "x_ix86_regparm" + (!type already_seen 2) + (!fileloc "options.h" 8781) + nil ) + (!pair "x_recip_mask" + (!type already_seen 2) + (!fileloc "options.h" 8782) + nil ) + (!pair "arch" + (!type already_seen 8) + (!fileloc "options.h" 8783) + nil ) + (!pair "arch_specified" + (!type already_seen 8) + (!fileloc "options.h" 8784) + nil ) + (!pair "branch_cost" + (!type already_seen 8) + (!fileloc "options.h" 8785) + nil ) + (!pair "prefetch_sse" + (!type already_seen 8) + (!fileloc "options.h" 8786) + nil ) + (!pair "schedule" + (!type already_seen 8) + (!fileloc "options.h" 8787) + nil ) + (!pair "tune" + (!type already_seen 8) + (!fileloc "options.h" 8788) + nil ) + (!pair "tune_defaulted" + (!type already_seen 8) + (!fileloc "options.h" 8789) + nil ) + (!pair "x_flag_fentry" + (!type already_seen 2) + (!fileloc "options.h" 8790) + nil ) + (!pair "x_ix86_needed" + (!type already_seen 2) + (!fileloc "options.h" 8791) + nil ) + (!pair "explicit_mask" + (!type array 624 nil gc_used "1" + (!type already_seen 2) + ) + (!fileloc "options.h" 8793) + nil ) + (!pair "explicit_mask_target_flags" + (!type already_seen 2) + (!fileloc "options.h" 8794) + nil ) + (!pair "explicit_mask_ix86_isa_flags" + (!type already_seen 2) + (!fileloc "options.h" 8795) + nil ) + (!pair "explicit_mask_ix86_isa_flags2" + (!type already_seen 2) + (!fileloc "options.h" 8796) + nil ) + (!pair "explicit_mask_ix86_target_flags" + (!type already_seen 2) + (!fileloc "options.h" 8797) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "tree-core.h" 2035) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "tree-core.h" 2085) + (!options + (!option tag string "TS_TARGET_OPTION") + ) + ) + ) + (!options + (!option variable_size string "") + (!option desc string "tree_node_structure (&%h)") + (!option ptr_alias type + (!type lang_struct 625 nil gc_pointed_to "lang_tree_node" + (!srcfileloc "ada/gcc-interface/ada-tree.h" 33) + (!fields 0 ) + (!options + (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL") + (!option desc string "0") + ) + 1023 + (!homotypes 8 + (!type union 626 nil gc_pointed_to "lang_tree_node" + (!srcfileloc "lto/lto-tree.h" 54) + (!fields 1 + (!pair "generic" + (!type already_seen 22) + (!srcfileloc "lto/lto-tree.h" 53) + (!options + (!option desc string "tree_node_structure (&%h)") + (!option tag string "TS_LTO_GENERIC") + ) + ) + ) + (!options + (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_TYPE_COMMON) ? ((union lang_tree_node *) %h.generic.type_common.next_variant) : CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) %h.generic.common.chain) : NULL") + (!option desc string "lto_tree_node_structure (&%h)") + ) + 128 + (!type already_seen 625) + ) + + (!type union 627 nil gc_pointed_to "lang_tree_node" + (!srcfileloc "jit/dummy-frontend.cc" 516) + (!fields 2 + (!pair "generic" + (!type already_seen 22) + (!srcfileloc "jit/dummy-frontend.cc" 514) + (!options + (!option desc string "tree_node_structure (&%h)") + (!option tag string "0") + ) + ) + (!pair "identifier" + (!type lang_struct 628 nil gc_used "lang_identifier" + (!srcfileloc "d/d-tree.h" 344) + (!fields 0 ) + nil 248 + (!homotypes 7 + (!type struct 629 nil gc_used "lang_identifier" + (!srcfileloc "lto/lto-tree.h" 27) + (!fields 1 + (!pair "base" + (!type already_seen 51) + (!srcfileloc "lto/lto-tree.h" 26) + nil ) + ) + nil 128 + (!type already_seen 628) + nil ) + + (!type struct 630 nil gc_used "lang_identifier" + (!srcfileloc "jit/dummy-frontend.cc" 505) + (!fields 1 + (!pair "common" + (!type already_seen 51) + (!srcfileloc "jit/dummy-frontend.cc" 504) + nil ) + ) + nil 64 + (!type already_seen 628) + nil ) + + (!type struct 631 nil gc_used "lang_identifier" + (!srcfileloc "go/go-lang.cc" 66) + (!fields 1 + (!pair "common" + (!type already_seen 51) + (!srcfileloc "go/go-lang.cc" 65) + nil ) + ) + nil 32 + (!type already_seen 628) + nil ) + + (!type struct 632 nil gc_used "lang_identifier" + (!srcfileloc "fortran/f95-lang.cc" 48) + (!fields 1 + (!pair "common" + (!type already_seen 51) + (!srcfileloc "fortran/f95-lang.cc" 47) + nil ) + ) + nil 16 + (!type already_seen 628) + nil ) + + (!type struct 633 nil gc_used "lang_identifier" + (!srcfileloc "d/d-tree.h" 214) + (!fields 5 + (!pair "common" + (!type already_seen 51) + (!srcfileloc "d/d-tree.h" 203) + nil ) + (!pair "pretty_ident" + (!type already_seen 23) + (!srcfileloc "d/d-tree.h" 206) + nil ) + (!pair "decl_tree" + (!type already_seen 23) + (!srcfileloc "d/d-tree.h" 209) + nil ) + (!pair "dsymbol" + (!type already_seen 61) + (!srcfileloc "d/d-tree.h" 212) + (!options + (!option skip string "") + ) + ) + (!pair "daggregate" + (!type pointer 634 nil gc_unused + (!type struct 635 + (!type already_seen 634) + gc_unused "AggregateDeclaration" + (!srcfileloc "d/d-tree.h" 213) + (!fields 0 ) + nil 8 nil nil ) + ) + (!srcfileloc "d/d-tree.h" 213) + (!options + (!option skip string "") + ) + ) + ) + nil 8 + (!type already_seen 628) + nil ) + + (!type struct 636 nil gc_used "lang_identifier" + (!srcfileloc "cp/cp-tree.h" 682) + (!fields 2 + (!pair "c_common" + (!type struct 637 nil gc_used "c_common_identifier" + (!srcfileloc "c-family/c-common.h" 399) + (!fields 2 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "c-family/c-common.h" 397) + nil ) + (!pair "node" + (!type already_seen 12) + (!srcfileloc "c-family/c-common.h" 398) + nil ) + ) + nil 774 nil nil ) + (!srcfileloc "cp/cp-tree.h" 680) + nil ) + (!pair "bindings" + (!type already_seen 89) + (!srcfileloc "cp/cp-tree.h" 681) + nil ) + ) + nil 516 + (!type already_seen 628) + nil ) + + (!type struct 638 nil gc_used "lang_identifier" + (!srcfileloc "c/c-decl.cc" 226) + (!fields 4 + (!pair "common_id" + (!type already_seen 637) + (!srcfileloc "c/c-decl.cc" 222) + nil ) + (!pair "symbol_binding" + (!type pointer 639 nil gc_used + (!type struct 640 + (!type already_seen 639) + gc_pointed_to "c_binding" + (!srcfileloc "c/c-decl.cc" 211) + (!fields 11 + (!pair "u" + (!type union 641 nil gc_used "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/c/c-decl.cc:197" + (!srcfileloc "c/c-decl.cc" 200) + (!fields 2 + (!pair "type" + (!type already_seen 23) + (!srcfileloc "c/c-decl.cc" 198) + (!options + (!option tag string "0") + ) + ) + (!pair "label" + (!type pointer 642 nil gc_used + (!type struct 643 + (!type already_seen 642) + gc_pointed_to "c_label_vars" + (!srcfileloc "c/c-decl.cc" 390) + (!fields 4 + (!pair "shadowed" + (!type already_seen 642) + (!srcfileloc "c/c-decl.cc" 378) + nil ) + (!pair "label_bindings" + (!type struct 644 nil gc_used "c_spot_bindings" + (!srcfileloc "c/c-decl.cc" 357) + (!fields 4 + (!pair "scope" + (!type pointer 645 nil gc_used + (!type struct 646 + (!type already_seen 645) + gc_pointed_to "c_scope" + (!srcfileloc "c/c-decl.cc" 492) + (!fields 14 + (!pair "outer" + (!type already_seen 645) + (!srcfileloc "c/c-decl.cc" 439) + nil ) + (!pair "outer_function" + (!type already_seen 645) + (!srcfileloc "c/c-decl.cc" 442) + nil ) + (!pair "bindings" + (!type already_seen 639) + (!srcfileloc "c/c-decl.cc" 445) + nil ) + (!pair "blocks" + (!type already_seen 23) + (!srcfileloc "c/c-decl.cc" 449) + nil ) + (!pair "blocks_last" + (!type already_seen 23) + (!srcfileloc "c/c-decl.cc" 450) + nil ) + (!pair "depth" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 454) + nil ) + (!pair "parm_flag" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 458) + nil ) + (!pair "had_vla_unspec" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 462) + nil ) + (!pair "warned_forward_parm_decls" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 467) + nil ) + (!pair "function_body" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 473) + nil ) + (!pair "keep" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 476) + nil ) + (!pair "float_const_decimal64" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 479) + nil ) + (!pair "has_label_bindings" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 484) + nil ) + (!pair "has_jump_unsafe_decl" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 491) + nil ) + ) + (!options + (!option chain_next string "%h.outer") + ) + 258 nil nil ) + ) + (!srcfileloc "c/c-decl.cc" 342) + nil ) + (!pair "bindings_in_scope" + (!type already_seen 639) + (!srcfileloc "c/c-decl.cc" 346) + nil ) + (!pair "stmt_exprs" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 353) + nil ) + (!pair "left_stmt_expr" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 356) + nil ) + ) + nil 258 nil nil ) + (!srcfileloc "c/c-decl.cc" 380) + nil ) + (!pair "decls_in_scope" + (!type already_seen 84) + (!srcfileloc "c/c-decl.cc" 385) + nil ) + (!pair "gotos" + (!type pointer 647 nil gc_used + (!type user_struct 648 + (!type already_seen 647) + gc_pointed_to "vec" + (!srcfileloc "c/c-decl.cc" 389) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "c/c-decl.cc" 389) + nil ) + (!pair "c_goto_bindings_p" + (!type pointer 649 nil gc_used + (!type struct 650 + (!type already_seen 649) + gc_pointed_to "c_goto_bindings" + (!srcfileloc "c/c-decl.cc" 368) + (!fields 2 + (!pair "loc" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 365) + nil ) + (!pair "goto_bindings" + (!type already_seen 644) + (!srcfileloc "c/c-decl.cc" 367) + nil ) + ) + nil 258 nil nil ) + ) + (!srcfileloc "c/c-decl.cc" 389) + nil ) + ) + ) + ) + (!srcfileloc "c/c-decl.cc" 389) + nil ) + ) + nil 258 nil nil ) + ) + (!srcfileloc "c/c-decl.cc" 199) + (!options + (!option tag string "1") + ) + ) + ) + nil 258 nil ) + (!srcfileloc "c/c-decl.cc" 200) + (!options + (!option desc string "TREE_CODE (%0.decl) == LABEL_DECL") + ) + ) + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "c/c-decl.cc" 201) + nil ) + (!pair "id" + (!type already_seen 23) + (!srcfileloc "c/c-decl.cc" 202) + nil ) + (!pair "prev" + (!type already_seen 639) + (!srcfileloc "c/c-decl.cc" 203) + nil ) + (!pair "shadowed" + (!type already_seen 639) + (!srcfileloc "c/c-decl.cc" 204) + nil ) + (!pair "depth" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 205) + nil ) + (!pair "invisible" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 206) + nil ) + (!pair "nested" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 207) + nil ) + (!pair "inner_comp" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 208) + nil ) + (!pair "in_struct" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 209) + nil ) + (!pair "locus" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 210) + nil ) + ) + (!options + (!option chain_next string "%h.prev") + ) + 258 nil nil ) + ) + (!srcfileloc "c/c-decl.cc" 223) + nil ) + (!pair "tag_binding" + (!type already_seen 639) + (!srcfileloc "c/c-decl.cc" 224) + nil ) + (!pair "label_binding" + (!type already_seen 639) + (!srcfileloc "c/c-decl.cc" 225) + nil ) + ) + nil 258 + (!type already_seen 628) + nil ) + ) + ) + (!srcfileloc "jit/dummy-frontend.cc" 515) + (!options + (!option tag string "1") + ) + ) + ) + (!options + (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL") + (!option desc string "TREE_CODE (&%h.generic) == IDENTIFIER_NODE") + ) + 64 + (!type already_seen 625) + ) + + (!type union 651 nil gc_pointed_to "lang_tree_node" + (!srcfileloc "go/go-lang.cc" 77) + (!fields 2 + (!pair "generic" + (!type already_seen 22) + (!srcfileloc "go/go-lang.cc" 75) + (!options + (!option desc string "tree_node_structure (&%h)") + (!option tag string "0") + ) + ) + (!pair "identifier" + (!type already_seen 628) + (!srcfileloc "go/go-lang.cc" 76) + (!options + (!option tag string "1") + ) + ) + ) + (!options + (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL") + (!option desc string "TREE_CODE (&%h.generic) == IDENTIFIER_NODE") + ) + 32 + (!type already_seen 625) + ) + + (!type union 652 nil gc_pointed_to "lang_tree_node" + (!srcfileloc "fortran/f95-lang.cc" 58) + (!fields 2 + (!pair "generic" + (!type already_seen 22) + (!srcfileloc "fortran/f95-lang.cc" 56) + (!options + (!option desc string "tree_node_structure (&%h)") + (!option tag string "0") + ) + ) + (!pair "identifier" + (!type already_seen 628) + (!srcfileloc "fortran/f95-lang.cc" 57) + (!options + (!option tag string "1") + ) + ) + ) + (!options + (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL") + (!option desc string "TREE_CODE (&%h.generic) == IDENTIFIER_NODE") + ) + 16 + (!type already_seen 625) + ) + + (!type union 653 nil gc_pointed_to "lang_tree_node" + (!srcfileloc "d/d-tree.h" 346) + (!fields 3 + (!pair "generic" + (!type already_seen 22) + (!srcfileloc "d/d-tree.h" 343) + (!options + (!option desc string "tree_node_structure (&%h)") + (!option tag string "TS_D_GENERIC") + ) + ) + (!pair "identifier" + (!type already_seen 628) + (!srcfileloc "d/d-tree.h" 344) + (!options + (!option tag string "TS_D_IDENTIFIER") + ) + ) + (!pair "frameinfo" + (!type struct 654 nil gc_used "tree_frame_info" + (!srcfileloc "d/d-tree.h" 345) + (!fields 2 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "d/d-tree.h" 180) + nil ) + (!pair "frame_type" + (!type already_seen 23) + (!srcfileloc "d/d-tree.h" 181) + nil ) + ) + nil 8 nil nil ) + (!srcfileloc "d/d-tree.h" 345) + (!options + (!option tag string "TS_D_FRAMEINFO") + ) + ) + ) + (!options + (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL") + (!option desc string "d_tree_node_structure (&%h)") + ) + 8 + (!type already_seen 625) + ) + + (!type union 655 nil gc_pointed_to "lang_tree_node" + (!srcfileloc "cp/cp-tree.h" 1812) + (!fields 17 + (!pair "generic" + (!type already_seen 22) + (!srcfileloc "cp/cp-tree.h" 1788) + (!options + (!option desc string "tree_node_structure (&%h)") + (!option tag string "TS_CP_GENERIC") + ) + ) + (!pair "tpi" + (!type struct 656 nil gc_used "template_parm_index" + (!srcfileloc "cp/cp-tree.h" 703) + (!fields 5 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "cp/cp-tree.h" 698) + nil ) + (!pair "index" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 699) + nil ) + (!pair "level" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 700) + nil ) + (!pair "orig_level" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 701) + nil ) + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 702) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1789) + (!options + (!option tag string "TS_CP_TPI") + ) + ) + (!pair "ptrmem" + (!type struct 657 + (!type pointer 658 nil gc_unused + (!type already_seen 657) + ) + gc_used "ptrmem_cst" + (!srcfileloc "cp/cp-tree.h" 709) + (!fields 3 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "cp/cp-tree.h" 706) + nil ) + (!pair "member" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 707) + nil ) + (!pair "locus" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 708) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1790) + (!options + (!option tag string "TS_CP_PTRMEM") + ) + ) + (!pair "overload" + (!type struct 659 nil gc_used "tree_overload" + (!srcfileloc "cp/cp-tree.h" 815) + (!fields 2 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "cp/cp-tree.h" 813) + nil ) + (!pair "function" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 814) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1791) + (!options + (!option tag string "TS_CP_OVERLOAD") + ) + ) + (!pair "binding_vec" + (!type struct 660 nil gc_used "tree_binding_vec" + (!srcfileloc "cp/name-lookup.h" 149) + (!fields 3 + (!pair "base" + (!type already_seen 25) + (!srcfileloc "cp/name-lookup.h" 146) + nil ) + (!pair "name" + (!type already_seen 23) + (!srcfileloc "cp/name-lookup.h" 147) + nil ) + (!pair "vec" + (!type array 661 nil gc_used "1" + (!type struct 662 nil gc_used "binding_cluster" + (!srcfileloc "cp/name-lookup.h" 148) + (!fields 2 + (!pair "indices" + (!type array 663 nil gc_unused "BINDING_VECTOR_SLOTS_PER_CLUSTER" + (!type struct 664 nil gc_unused "binding_index" + (!srcfileloc "cp/name-lookup.h" 129) + (!fields 0 ) + nil 516 nil nil ) + ) + (!srcfileloc "cp/name-lookup.h" 129) + (!options + (!option skip string "") + ) + ) + (!pair "slots" + (!type array 665 nil gc_used "BINDING_VECTOR_SLOTS_PER_CLUSTER" + (!type struct 666 nil gc_used "binding_slot" + (!srcfileloc "cp/name-lookup.h" 111) + (!fields 1 + (!pair "u" + (!type union 667 nil gc_used "binding_slot_lazy" + (!srcfileloc "cp/name-lookup.h" 80) + (!fields 1 + (!pair "binding" + (!type already_seen 23) + (!srcfileloc "cp/name-lookup.h" 79) + (!options + (!option tag string "false") + ) + ) + ) + (!options + (!option desc string "%1.is_lazy ()") + ) + 516 nil ) + (!srcfileloc "cp/name-lookup.h" 80) + nil ) + ) + nil 516 nil nil ) + ) + (!srcfileloc "cp/name-lookup.h" 130) + nil ) + ) + nil 516 nil nil ) + ) + (!srcfileloc "cp/name-lookup.h" 148) + (!options + (!option length string "%h.base.u.dependence_info.base") + ) + ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1792) + (!options + (!option tag string "TS_CP_BINDING_VECTOR") + ) + ) + (!pair "baselink" + (!type struct 668 nil gc_used "tree_baselink" + (!srcfileloc "cp/cp-tree.h" 1083) + (!fields 4 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "cp/cp-tree.h" 1079) + nil ) + (!pair "binfo" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1080) + nil ) + (!pair "functions" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1081) + nil ) + (!pair "access_binfo" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1082) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1793) + (!options + (!option tag string "TS_CP_BASELINK") + ) + ) + (!pair "template_decl" + (!type struct 669 nil gc_used "tree_template_decl" + (!srcfileloc "cp/cp-tree.h" 1041) + (!fields 3 + (!pair "common" + (!type already_seen 53) + (!srcfileloc "cp/cp-tree.h" 1038) + nil ) + (!pair "arguments" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1039) + nil ) + (!pair "result" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1040) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1794) + (!options + (!option tag string "TS_CP_TEMPLATE_DECL") + ) + ) + (!pair "deferred_parse" + (!type struct 670 nil gc_used "tree_deferred_parse" + (!srcfileloc "cp/cp-tree.h" 1311) + (!fields 3 + (!pair "base" + (!type already_seen 25) + (!srcfileloc "cp/cp-tree.h" 1308) + nil ) + (!pair "tokens" + (!type already_seen 70) + (!srcfileloc "cp/cp-tree.h" 1309) + nil ) + (!pair "instantiations" + (!type already_seen 84) + (!srcfileloc "cp/cp-tree.h" 1310) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1795) + (!options + (!option tag string "TS_CP_DEFERRED_PARSE") + ) + ) + (!pair "deferred_noexcept" + (!type struct 671 nil gc_used "tree_deferred_noexcept" + (!srcfileloc "cp/cp-tree.h" 1332) + (!fields 3 + (!pair "base" + (!type already_seen 25) + (!srcfileloc "cp/cp-tree.h" 1329) + nil ) + (!pair "pattern" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1330) + nil ) + (!pair "args" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1331) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1796) + (!options + (!option tag string "TS_CP_DEFERRED_NOEXCEPT") + ) + ) + (!pair "identifier" + (!type already_seen 628) + (!srcfileloc "cp/cp-tree.h" 1797) + (!options + (!option tag string "TS_CP_IDENTIFIER") + ) + ) + (!pair "static_assertion" + (!type struct 672 nil gc_used "tree_static_assert" + (!srcfileloc "cp/cp-tree.h" 1355) + (!fields 4 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "cp/cp-tree.h" 1351) + nil ) + (!pair "condition" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1352) + nil ) + (!pair "message" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1353) + nil ) + (!pair "location" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1354) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1799) + (!options + (!option tag string "TS_CP_STATIC_ASSERT") + ) + ) + (!pair "argument_pack_select" + (!type struct 673 nil gc_used "tree_argument_pack_select" + (!srcfileloc "cp/cp-tree.h" 1361) + (!fields 3 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "cp/cp-tree.h" 1358) + nil ) + (!pair "argument_pack" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1359) + nil ) + (!pair "index" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1360) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1801) + (!options + (!option tag string "TS_CP_ARGUMENT_PACK_SELECT") + ) + ) + (!pair "trait_expression" + (!type struct 674 nil gc_used "tree_trait_expr" + (!srcfileloc "cp/cp-tree.h" 1424) + (!fields 5 + (!pair "common" + (!type already_seen 32) + (!srcfileloc "cp/cp-tree.h" 1419) + nil ) + (!pair "type1" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1420) + nil ) + (!pair "type2" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1421) + nil ) + (!pair "locus" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1422) + nil ) + (!pair "kind" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1423) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1803) + (!options + (!option tag string "TS_CP_TRAIT_EXPR") + ) + ) + (!pair "lambda_expression" + (!type struct 675 nil gc_used "tree_lambda_expr" + (!srcfileloc "cp/cp-tree.h" 1527) + (!fields 9 + (!pair "typed" + (!type already_seen 31) + (!srcfileloc "cp/cp-tree.h" 1518) + nil ) + (!pair "capture_list" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1519) + nil ) + (!pair "this_capture" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1520) + nil ) + (!pair "extra_scope" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1521) + nil ) + (!pair "regen_info" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1522) + nil ) + (!pair "pending_proxies" + (!type already_seen 84) + (!srcfileloc "cp/cp-tree.h" 1523) + nil ) + (!pair "locus" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1524) + nil ) + (!pair "default_capture_mode" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1525) + nil ) + (!pair "discriminator" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1526) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1805) + (!options + (!option tag string "TS_CP_LAMBDA_EXPR") + ) + ) + (!pair "template_info" + (!type struct 676 nil gc_used "tree_template_info" + (!srcfileloc "cp/cp-tree.h" 1561) + (!fields 4 + (!pair "base" + (!type already_seen 25) + (!srcfileloc "cp/cp-tree.h" 1557) + nil ) + (!pair "tmpl" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1558) + nil ) + (!pair "args" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1559) + nil ) + (!pair "deferred_access_checks" + (!type already_seen 77) + (!srcfileloc "cp/cp-tree.h" 1560) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1807) + (!options + (!option tag string "TS_CP_TEMPLATE_INFO") + ) + ) + (!pair "constraint_info" + (!type struct 677 nil gc_used "tree_constraint_info" + (!srcfileloc "cp/cp-tree.h" 1578) + (!fields 4 + (!pair "base" + (!type already_seen 25) + (!srcfileloc "cp/cp-tree.h" 1574) + nil ) + (!pair "template_reqs" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1575) + nil ) + (!pair "declarator_reqs" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1576) + nil ) + (!pair "associated_constr" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1577) + nil ) + ) + nil 516 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1809) + (!options + (!option tag string "TS_CP_CONSTRAINT_INFO") + ) + ) + (!pair "userdef_literal" + (!type struct 678 nil gc_used "tree_userdef_literal" + (!srcfileloc "c-family/c-common.h" 1319) + (!fields 5 + (!pair "base" + (!type already_seen 25) + (!srcfileloc "c-family/c-common.h" 1314) + nil ) + (!pair "suffix_id" + (!type already_seen 23) + (!srcfileloc "c-family/c-common.h" 1315) + nil ) + (!pair "value" + (!type already_seen 23) + (!srcfileloc "c-family/c-common.h" 1316) + nil ) + (!pair "num_string" + (!type already_seen 23) + (!srcfileloc "c-family/c-common.h" 1317) + nil ) + (!pair "overflow" + (!type already_seen 2) + (!srcfileloc "c-family/c-common.h" 1318) + nil ) + ) + nil 774 nil nil ) + (!srcfileloc "cp/cp-tree.h" 1811) + (!options + (!option tag string "TS_CP_USERDEF_LITERAL") + ) + ) + ) + (!options + (!option chain_next string "(union lang_tree_node *) c_tree_chain_next (&%h.generic)") + (!option desc string "cp_tree_node_structure (&%h)") + ) + 516 + (!type already_seen 625) + ) + + (!type union 679 nil gc_pointed_to "lang_tree_node" + (!srcfileloc "c/c-decl.cc" 331) + (!fields 2 + (!pair "generic" + (!type already_seen 22) + (!srcfileloc "c/c-decl.cc" 329) + (!options + (!option desc string "tree_node_structure (&%h)") + (!option tag string "0") + ) + ) + (!pair "identifier" + (!type already_seen 628) + (!srcfileloc "c/c-decl.cc" 330) + (!options + (!option tag string "1") + ) + ) + ) + (!options + (!option chain_next string "(union lang_tree_node *) c_tree_chain_next (&%h.generic)") + (!option desc string "TREE_CODE (&%h.generic) == IDENTIFIER_NODE") + ) + 258 + (!type already_seen 625) + ) + + (!type union 680 nil gc_pointed_to "lang_tree_node" + (!srcfileloc "ada/gcc-interface/ada-tree.h" 33) + (!fields 1 + (!pair "generic" + (!type already_seen 22) + (!srcfileloc "ada/gcc-interface/ada-tree.h" 32) + (!options + (!option desc string "tree_node_structure (&%h)") + (!option tag string "0") + ) + ) + ) + (!options + (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL") + (!option desc string "0") + ) + 1 + (!type already_seen 625) + ) + ) + ) + ) + ) + 1023 nil ) + "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL" "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL") + (!option tag string "false") + ) + ) + (!pair "next" + (!type already_seen 19) + (!srcfileloc "../libcpp/include/cpplib.h" 827) + (!options + (!option tag string "true") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "../libcpp/include/cpplib.h" 828) + (!options + (!option desc string "%1.kind == cmk_assert") + ) + ) + (!pair "line" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 831) + nil ) + (!pair "count" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 835) + nil ) + (!pair "paramc" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 838) + nil ) + (!pair "lazy" + (!type already_seen 8) + (!srcfileloc "../libcpp/include/cpplib.h" 841) + nil ) + (!pair "kind" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 844) + nil ) + (!pair "fun_like" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 847) + nil ) + (!pair "variadic" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 850) + nil ) + (!pair "syshdr" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 853) + nil ) + (!pair "used" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 856) + nil ) + (!pair "extra_tokens" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 861) + nil ) + (!pair "imported_p" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 864) + nil ) + (!pair "exp" + (!type union 681 nil gc_used "cpp_exp_u" + (!srcfileloc "../libcpp/include/cpplib.h" 877) + (!fields 2 + (!pair "tokens" + (!type array 682 nil gc_used "1" + (!type struct 683 + (!type pointer 684 nil gc_used + (!type already_seen 683) + ) + gc_pointed_to "cpp_token" + (!srcfileloc "../libcpp/include/cpplib.h" 274) + (!fields 4 + (!pair "src_loc" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 248) + nil ) + (!pair "type" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 250) + nil ) + (!pair "flags" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 251) + nil ) + (!pair "val" + (!type union 685 nil gc_used "cpp_token_u" + (!srcfileloc "../libcpp/include/cpplib.h" 273) + (!fields 6 + (!pair "node" + (!type struct 686 nil gc_used "cpp_identifier" + (!srcfileloc "../libcpp/include/cpplib.h" 241) + (!fields 2 + (!pair "node" + (!type already_seen 13) + (!srcfileloc "../libcpp/include/cpplib.h" 234) + (!options + (!option nested_ptr nested + (!type already_seen 22) + "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL" "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL") + ) + ) + (!pair "spelling" + (!type already_seen 13) + (!srcfileloc "../libcpp/include/cpplib.h" 240) + (!options + (!option nested_ptr nested + (!type already_seen 22) + "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL" "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "../libcpp/include/cpplib.h" 256) + (!options + (!option tag string "CPP_TOKEN_FLD_NODE") + ) + ) + (!pair "source" + (!type already_seen 684) + (!srcfileloc "../libcpp/include/cpplib.h" 259) + (!options + (!option tag string "CPP_TOKEN_FLD_SOURCE") + ) + ) + (!pair "str" + (!type struct 687 nil gc_used "cpp_string" + (!srcfileloc "../libcpp/include/cpplib.h" 183) + (!fields 2 + (!pair "len" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 181) + nil ) + (!pair "text" + (!type already_seen 11) + (!srcfileloc "../libcpp/include/cpplib.h" 182) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "../libcpp/include/cpplib.h" 262) + (!options + (!option tag string "CPP_TOKEN_FLD_STR") + ) + ) + (!pair "macro_arg" + (!type struct 688 nil gc_used "cpp_macro_arg" + (!srcfileloc "../libcpp/include/cpplib.h" 225) + (!fields 2 + (!pair "arg_no" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 218) + nil ) + (!pair "spelling" + (!type already_seen 13) + (!srcfileloc "../libcpp/include/cpplib.h" 224) + (!options + (!option nested_ptr nested + (!type already_seen 22) + "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL" "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "../libcpp/include/cpplib.h" 265) + (!options + (!option tag string "CPP_TOKEN_FLD_ARG_NO") + ) + ) + (!pair "token_no" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 269) + (!options + (!option tag string "CPP_TOKEN_FLD_TOKEN_NO") + ) + ) + (!pair "pragma" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 272) + (!options + (!option tag string "CPP_TOKEN_FLD_PRAGMA") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "../libcpp/include/cpplib.h" 273) + (!options + (!option desc string "cpp_token_val_index (&%1)") + ) + ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "../libcpp/include/cpplib.h" 871) + (!options + (!option length string "%1.count") + (!option tag string "false") + ) + ) + (!pair "text" + (!type already_seen 11) + (!srcfileloc "../libcpp/include/cpplib.h" 876) + (!options + (!option tag string "true") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "../libcpp/include/cpplib.h" 877) + (!options + (!option desc string "%1.kind == cmk_traditional") + ) + ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "../libcpp/include/cpplib.h" 941) + (!options + (!option tag string "NT_VOID") + ) + ) + (!pair "macro" + (!type already_seen 19) + (!srcfileloc "../libcpp/include/cpplib.h" 943) + (!options + (!option tag string "NT_USER_MACRO") + ) + ) + (!pair "builtin" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 945) + (!options + (!option tag string "NT_BUILTIN_MACRO") + ) + ) + (!pair "arg_index" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 947) + (!options + (!option tag string "NT_MACRO_ARG") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "../libcpp/include/cpplib.h" 968) + (!options + (!option desc string "%1.type") + ) + ) + ) + nil 1023 nil nil ) + + (!type already_seen 22) + + (!type struct 689 + (!type pointer 690 nil gc_unused + (!type already_seen 689) + ) + gc_used "line_map_macro" + (!srcfileloc "../libcpp/include/line-map.h" 740) + (!fields 4 + (!pair "n_tokens" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 465) + nil ) + (!pair "macro" + (!type already_seen 13) + (!srcfileloc "../libcpp/include/line-map.h" 474) + (!options + (!option nested_ptr nested + (!type already_seen 22) + "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL" "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL") + ) + ) + (!pair "macro_locations" + (!type already_seen 3) + (!srcfileloc "../libcpp/include/line-map.h" 529) + (!options + (!option atomic string "") + ) + ) + (!pair "expansion" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 536) + nil ) + ) + (!options + (!option tag string "2") + ) + 1023 nil + (!type already_seen 5) + ) + + (!type struct 691 nil gc_used "maps_info_ordinary" + (!srcfileloc "../libcpp/include/line-map.h" 785) + (!fields 3 + (!pair "maps" + (!type already_seen 7) + (!srcfileloc "../libcpp/include/line-map.h" 725) + (!options + (!option length string "%h.used") + ) + ) + (!pair "allocated" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 728) + nil ) + (!pair "used" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 732) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 692 nil gc_used "maps_info_macro" + (!srcfileloc "../libcpp/include/line-map.h" 787) + (!fields 3 + (!pair "maps" + (!type already_seen 690) + (!srcfileloc "../libcpp/include/line-map.h" 740) + (!options + (!option length string "%h.used") + ) + ) + (!pair "allocated" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 743) + nil ) + (!pair "used" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 747) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 693 + (!type pointer 694 nil gc_unused + (!type already_seen 693) + ) + gc_used "location_adhoc_data" + (!srcfileloc "../libcpp/include/line-map.h" 758) + (!fields 3 + (!pair "locus" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 755) + nil ) + (!pair "src_range" + (!type already_seen 1) + (!srcfileloc "../libcpp/include/line-map.h" 756) + nil ) + (!pair "data" + (!type already_seen 3) + (!srcfileloc "../libcpp/include/line-map.h" 757) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil nil ) + + (!type already_seen 333) + + (!type struct 695 nil gc_used "location_adhoc_data_map" + (!srcfileloc "../libcpp/include/line-map.h" 777) + (!fields 4 + (!pair "htab" + (!type already_seen 332) + (!srcfileloc "../libcpp/include/line-map.h" 773) + (!options + (!option skip string "") + ) + ) + (!pair "curr_loc" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 774) + nil ) + (!pair "allocated" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 775) + nil ) + (!pair "data" + (!type already_seen 694) + (!srcfileloc "../libcpp/include/line-map.h" 776) + (!options + (!option length string "%h.allocated") + ) + ) + ) + nil 1023 nil nil ) + + (!type struct 696 + (!type pointer 697 nil gc_used + (!type already_seen 696) + ) + gc_pointed_to "line_maps" + (!srcfileloc "../libcpp/include/line-map.h" 826) + (!fields 15 + (!pair "info_ordinary" + (!type already_seen 691) + (!srcfileloc "../libcpp/include/line-map.h" 785) + nil ) + (!pair "info_macro" + (!type already_seen 692) + (!srcfileloc "../libcpp/include/line-map.h" 787) + nil ) + (!pair "depth" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 790) + nil ) + (!pair "trace_includes" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 793) + nil ) + (!pair "seen_line_directive" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 796) + nil ) + (!pair "highest_location" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 799) + nil ) + (!pair "highest_line" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 802) + nil ) + (!pair "max_column_hint" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 806) + nil ) + (!pair "reallocator" + (!type already_seen 218) + (!srcfileloc "../libcpp/include/line-map.h" 809) + (!options + (!option callback string "") + ) + ) + (!pair "round_alloc_size" + (!type already_seen 218) + (!srcfileloc "../libcpp/include/line-map.h" 813) + (!options + (!option callback string "") + ) + ) + (!pair "location_adhoc_data_map" + (!type already_seen 695) + (!srcfileloc "../libcpp/include/line-map.h" 815) + nil ) + (!pair "builtin_location" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 819) + nil ) + (!pair "default_range_bits" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 822) + nil ) + (!pair "num_optimized_ranges" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 824) + nil ) + (!pair "num_unoptimized_ranges" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 825) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 698 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/../libcpp/include/line-map.h:1300"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 699 nil gc_unused "range_label"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 700 nil gc_unused "location_range"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 701 nil gc_unused "semi_embedded_vec"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 702 nil gc_unused "fixit_hint"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 703 nil gc_unused "diagnostic_path"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 704 nil gc_unused "rich_location"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 705 nil gc_unused "label_text"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 706 nil gc_unused "linemap_stats"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 707 nil gc_unused "cpp_reader"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 708 nil gc_unused "cpp_buffer"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 709 nil gc_unused "cpp_options"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 683) + + (!type already_seen 687) + + (!type already_seen 20) + + (!type struct 710 nil gc_unused "cpp_callbacks"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 711 nil gc_unused "cpp_dir"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 712 nil gc_unused "_cpp_file"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 688) + + (!type already_seen 686) + + (!type already_seen 685) + + (!type already_seen 21) + + (!type already_seen 681) + + (!type already_seen 18) + + (!type already_seen 15) + + (!type struct 713 nil gc_unused "cpp_string_location_reader"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 714 nil gc_unused "cpp_substring_ranges"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 715 nil gc_unused "cpp_num"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 716 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/../libcpp/include/cpplib.h:1358"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 717 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/../libcpp/include/cpplib.h:1369"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 718 nil gc_unused "cpp_converted_source"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 719 nil gc_unused "save_macro_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 720 nil gc_unused "cpp_decoded_char"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 721 nil gc_unused "cpp_char_column_policy"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 722 nil gc_unused "cpp_display_width_computation"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 723 nil gc_unused "char_span"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 724 nil gc_unused "file_cache_slot"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 725 nil gc_unused "file_cache"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 726 + (!type pointer 727 nil gc_used + (!type already_seen 726) + ) + gc_pointed_to "string_concat" + (!srcfileloc "input.h" 256) + (!fields 2 + (!pair "m_num" + (!type already_seen 2) + (!srcfileloc "input.h" 254) + nil ) + (!pair "m_locs" + (!type already_seen 3) + (!srcfileloc "input.h" 255) + (!options + (!option atomic string "") + ) + ) + ) + nil 1023 nil nil ) + + (!type struct 728 + (!type pointer 729 nil gc_used + (!type already_seen 728) + ) + gc_pointed_to "string_concat_db" + (!srcfileloc "input.h" 281) + (!fields 1 + (!pair "m_table" + (!type pointer 730 nil gc_used + (!type user_struct 731 + (!type already_seen 730) + gc_pointed_to "hash_map" + (!srcfileloc "input.h" 280) + (!fields 2 + (!pair "string_concat" + (!type already_seen 727) + (!srcfileloc "input.h" 280) + nil ) + (!pair "location_hash" + (!type undefined 732 nil gc_unused "location_hash" + (!srcfileloc "input.h" 280) + ) + (!srcfileloc "input.h" 280) + nil ) + ) + ) + ) + (!srcfileloc "input.h" 280) + nil ) + ) + nil 1023 nil nil ) + + (!type already_seen 731) + + (!type already_seen 732) + + (!type already_seen 392) + + (!type already_seen 387) + + (!type struct 733 + (!type pointer 734 nil gc_unused + (!type already_seen 733) + ) + gc_unused "simple_bitmap_def"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 101) + + (!type already_seen 45) + + (!type already_seen 289) + + (!type struct 735 nil gc_unused "scalar_float_mode" + (!srcfileloc "coretypes.h" 67) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 736 nil gc_unused "complex_mode"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 737 nil gc_unused "fixed_size_mode"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 738 nil gc_unused "opt_mode" + (!srcfileloc "coretypes.h" 65) + (!fields 1 + (!pair "scalar_mode" + (!type already_seen 45) + (!srcfileloc "coretypes.h" 65) + nil ) + ) + ) + + (!type user_struct 739 nil gc_unused "opt_mode" + (!srcfileloc "coretypes.h" 66) + (!fields 1 + (!pair "scalar_int_mode" + (!type already_seen 289) + (!srcfileloc "coretypes.h" 66) + nil ) + ) + ) + + (!type user_struct 740 nil gc_unused "opt_mode" + (!srcfileloc "coretypes.h" 67) + (!fields 1 + (!pair "scalar_float_mode" + (!type already_seen 735) + (!srcfileloc "coretypes.h" 67) + nil ) + ) + ) + + (!type already_seen 44) + + (!type user_struct 741 nil gc_unused "pod_mode" + (!srcfileloc "coretypes.h" 70) + (!fields 1 + (!pair "scalar_int_mode" + (!type already_seen 289) + (!srcfileloc "coretypes.h" 70) + nil ) + ) + ) + + (!type user_struct 742 nil gc_unused "pod_mode" + (!srcfileloc "coretypes.h" 71) + (!fields 1 + (!pair "fixed_size_mode" + (!type already_seen 2) + (!srcfileloc "coretypes.h" 71) + nil ) + ) + ) + + (!type struct 743 + (!type pointer 744 nil gc_unused + (!type already_seen 743) + ) + gc_pointed_to "rtx_expr_list" + (!srcfileloc "rtl.h" 468) + (!fields 0 ) + nil 1023 nil + (!type already_seen 101) + ) + + (!type struct 745 + (!type pointer 746 nil gc_used + (!type already_seen 745) + ) + gc_pointed_to "rtx_insn_list" + (!srcfileloc "rtl.h" 498) + (!fields 0 ) + nil 1023 nil + (!type already_seen 101) + ) + + (!type struct 747 nil gc_pointed_to "rtx_sequence" + (!srcfileloc "rtl.h" 526) + (!fields 0 ) + nil 1023 nil + (!type already_seen 101) + ) + + (!type already_seen 297) + + (!type struct 748 nil gc_pointed_to "rtx_debug_insn" + (!srcfileloc "rtl.h" 587) + (!fields 0 ) + nil 1023 nil + (!type already_seen 297) + ) + + (!type struct 749 nil gc_pointed_to "rtx_nonjump_insn" + (!srcfileloc "rtl.h" 598) + (!fields 0 ) + nil 1023 nil + (!type already_seen 297) + ) + + (!type struct 750 nil gc_pointed_to "rtx_jump_insn" + (!srcfileloc "rtl.h" 625) + (!fields 0 ) + nil 1023 nil + (!type already_seen 297) + ) + + (!type struct 751 nil gc_pointed_to "rtx_call_insn" + (!srcfileloc "rtl.h" 638) + (!fields 0 ) + nil 1023 nil + (!type already_seen 297) + ) + + (!type struct 752 nil gc_pointed_to "rtx_jump_table_data" + (!srcfileloc "rtl.h" 664) + (!fields 0 ) + nil 1023 nil + (!type already_seen 297) + ) + + (!type struct 753 nil gc_pointed_to "rtx_barrier" + (!srcfileloc "rtl.h" 675) + (!fields 0 ) + nil 1023 nil + (!type already_seen 297) + ) + + (!type already_seen 366) + + (!type struct 754 + (!type pointer 755 nil gc_used + (!type already_seen 754) + ) + gc_pointed_to "rtx_note" + (!srcfileloc "emit-rtl.h" 128) + (!fields 0 ) + nil 1023 nil + (!type already_seen 297) + ) + + (!type already_seen 231) + + (!type struct 756 + (!type pointer 757 nil gc_unused + (!type already_seen 756) + ) + gc_unused "hwivec_def" + (!srcfileloc "rtl.h" 282) + (!fields 1 + (!pair "elem" + (!type array 758 nil gc_unused "1" + (!type already_seen 2) + ) + (!srcfileloc "rtl.h" 281) + nil ) + ) + (!options + (!option variable_size string "") + ) + 1023 nil nil ) + + (!type already_seen 282) + + (!type struct 759 nil gc_unused "gimple_stmt_iterator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 760 nil gc_pointed_to "gcond" + (!srcfileloc "gimple.h" 885) + (!fields 0 ) + (!options + (!option tag string "GSS_WITH_OPS") + ) + 1023 nil + (!type struct 761 nil gc_pointed_to "gimple_statement_with_ops" + (!srcfileloc "gimple.h" 319) + (!fields 1 + (!pair "op" + (!type array 762 nil gc_used "1" + (!type already_seen 23) + ) + (!srcfileloc "gimple.h" 318) + (!options + (!option length string "%h.num_ops") + ) + ) + ) + (!options + (!option tag string "GSS_WITH_OPS") + ) + 1023 nil + (!type struct 763 nil gc_pointed_to "gimple_statement_with_ops_base" + (!srcfileloc "gimple.h" 304) + (!fields 1 + (!pair "use_ops" + (!type already_seen 402) + (!srcfileloc "gimple.h" 303) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil + (!type already_seen 282) + ) + ) + ) + + (!type struct 764 nil gc_pointed_to "gdebug" + (!srcfileloc "gimple.h" 895) + (!fields 0 ) + (!options + (!option tag string "GSS_WITH_OPS") + ) + 1023 nil + (!type already_seen 761) + ) + + (!type struct 765 nil gc_pointed_to "ggoto" + (!srcfileloc "gimple.h" 905) + (!fields 0 ) + (!options + (!option tag string "GSS_WITH_OPS") + ) + 1023 nil + (!type already_seen 761) + ) + + (!type struct 766 nil gc_pointed_to "glabel" + (!srcfileloc "gimple.h" 915) + (!fields 0 ) + (!options + (!option tag string "GSS_WITH_OPS") + ) + 1023 nil + (!type already_seen 761) + ) + + (!type struct 767 nil gc_pointed_to "gswitch" + (!srcfileloc "gimple.h" 925) + (!fields 0 ) + (!options + (!option tag string "GSS_WITH_OPS") + ) + 1023 nil + (!type already_seen 761) + ) + + (!type struct 768 nil gc_pointed_to "gassign" + (!srcfileloc "gimple.h" 936) + (!fields 0 ) + (!options + (!option tag string "GSS_WITH_MEM_OPS") + ) + 1023 nil + (!type struct 769 nil gc_pointed_to "gimple_statement_with_memory_ops" + (!srcfileloc "gimple.h" 350) + (!fields 1 + (!pair "op" + (!type array 770 nil gc_used "1" + (!type already_seen 23) + ) + (!srcfileloc "gimple.h" 349) + (!options + (!option length string "%h.num_ops") + ) + ) + ) + (!options + (!option tag string "GSS_WITH_MEM_OPS") + ) + 1023 nil + (!type struct 771 nil gc_pointed_to "gimple_statement_with_memory_ops_base" + (!srcfileloc "gimple.h" 334) + (!fields 2 + (!pair "vdef" + (!type already_seen 23) + (!srcfileloc "gimple.h" 332) + (!options + (!option skip string "") + ) + ) + (!pair "vuse" + (!type already_seen 23) + (!srcfileloc "gimple.h" 333) + (!options + (!option skip string "") + ) + ) + ) + (!options + (!option tag string "GSS_WITH_MEM_OPS_BASE") + ) + 1023 nil + (!type already_seen 763) + ) + ) + ) + + (!type struct 772 nil gc_pointed_to "gasm" + (!srcfileloc "gimple.h" 586) + (!fields 6 + (!pair "string" + (!type already_seen 11) + (!srcfileloc "gimple.h" 572) + nil ) + (!pair "ni" + (!type already_seen 8) + (!srcfileloc "gimple.h" 576) + nil ) + (!pair "no" + (!type already_seen 8) + (!srcfileloc "gimple.h" 577) + nil ) + (!pair "nc" + (!type already_seen 8) + (!srcfileloc "gimple.h" 578) + nil ) + (!pair "nl" + (!type already_seen 8) + (!srcfileloc "gimple.h" 579) + nil ) + (!pair "op" + (!type array 773 nil gc_used "1" + (!type already_seen 23) + ) + (!srcfileloc "gimple.h" 585) + (!options + (!option length string "%h.num_ops") + ) + ) + ) + (!options + (!option tag string "GSS_ASM") + ) + 1023 nil + (!type already_seen 771) + ) + + (!type struct 774 + (!type pointer 775 nil gc_used + (!type already_seen 774) + ) + gc_pointed_to "gcall" + (!srcfileloc "gimple.h" 377) + (!fields 4 + (!pair "call_used" + (!type already_seen 385) + (!srcfileloc "gimple.h" 361) + nil ) + (!pair "call_clobbered" + (!type already_seen 385) + (!srcfileloc "gimple.h" 362) + nil ) + (!pair "u" + (!type union 776 nil gc_used "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/gimple.h:365" + (!srcfileloc "gimple.h" 368) + (!fields 2 + (!pair "fntype" + (!type already_seen 23) + (!srcfileloc "gimple.h" 366) + (!options + (!option tag string "0") + ) + ) + (!pair "internal_fn" + (!type already_seen 2) + (!srcfileloc "gimple.h" 367) + (!options + (!option tag string "GF_CALL_INTERNAL") + ) + ) + ) + (!options + (!option desc string "%1.subcode & GF_CALL_INTERNAL") + ) + 1023 nil ) + (!srcfileloc "gimple.h" 368) + nil ) + (!pair "op" + (!type array 777 nil gc_used "1" + (!type already_seen 23) + ) + (!srcfileloc "gimple.h" 374) + (!options + (!option length string "%h.num_ops") + ) + ) + ) + (!options + (!option tag string "GSS_CALL") + ) + 1023 nil + (!type already_seen 771) + ) + + (!type struct 778 nil gc_pointed_to "gtransaction" + (!srcfileloc "gimple.h" 867) + (!fields 4 + (!pair "body" + (!type already_seen 281) + (!srcfileloc "gimple.h" 861) + nil ) + (!pair "label_norm" + (!type already_seen 23) + (!srcfileloc "gimple.h" 864) + nil ) + (!pair "label_uninst" + (!type already_seen 23) + (!srcfileloc "gimple.h" 865) + nil ) + (!pair "label_over" + (!type already_seen 23) + (!srcfileloc "gimple.h" 866) + nil ) + ) + (!options + (!option tag string "GSS_TRANSACTION") + ) + 1023 nil + (!type already_seen 771) + ) + + (!type struct 779 nil gc_pointed_to "greturn" + (!srcfileloc "gimple.h" 946) + (!fields 0 ) + (!options + (!option tag string "GSS_WITH_MEM_OPS") + ) + 1023 nil + (!type already_seen 769) + ) + + (!type struct 780 nil gc_pointed_to "gbind" + (!srcfileloc "gimple.h" 413) + (!fields 3 + (!pair "vars" + (!type already_seen 23) + (!srcfileloc "gimple.h" 401) + nil ) + (!pair "block" + (!type already_seen 23) + (!srcfileloc "gimple.h" 409) + nil ) + (!pair "body" + (!type already_seen 281) + (!srcfileloc "gimple.h" 412) + nil ) + ) + (!options + (!option tag string "GSS_BIND") + ) + 1023 nil + (!type already_seen 282) + ) + + (!type struct 781 nil gc_pointed_to "gcatch" + (!srcfileloc "gimple.h" 428) + (!fields 2 + (!pair "types" + (!type already_seen 23) + (!srcfileloc "gimple.h" 424) + nil ) + (!pair "handler" + (!type already_seen 281) + (!srcfileloc "gimple.h" 427) + nil ) + ) + (!options + (!option tag string "GSS_CATCH") + ) + 1023 nil + (!type already_seen 282) + ) + + (!type struct 782 nil gc_pointed_to "geh_filter" + (!srcfileloc "gimple.h" 445) + (!fields 2 + (!pair "types" + (!type already_seen 23) + (!srcfileloc "gimple.h" 440) + nil ) + (!pair "failure" + (!type already_seen 281) + (!srcfileloc "gimple.h" 444) + nil ) + ) + (!options + (!option tag string "GSS_EH_FILTER") + ) + 1023 nil + (!type already_seen 282) + ) + + (!type struct 783 nil gc_pointed_to "geh_mnt" + (!srcfileloc "gimple.h" 467) + (!fields 1 + (!pair "fndecl" + (!type already_seen 23) + (!srcfileloc "gimple.h" 466) + nil ) + ) + (!options + (!option tag string "GSS_EH_MNT") + ) + 1023 nil + (!type already_seen 282) + ) + + (!type struct 784 nil gc_pointed_to "geh_else" + (!srcfileloc "gimple.h" 456) + (!fields 2 + (!pair "n_body" + (!type already_seen 281) + (!srcfileloc "gimple.h" 455) + nil ) + (!pair "e_body" + (!type already_seen 281) + (!srcfileloc "gimple.h" 455) + nil ) + ) + (!options + (!option tag string "GSS_EH_ELSE") + ) + 1023 nil + (!type already_seen 282) + ) + + (!type struct 785 nil gc_pointed_to "gresx" + (!srcfileloc "gimple.h" 505) + (!fields 0 ) + (!options + (!option tag string "GSS_EH_CTRL") + ) + 1023 nil + (!type struct 786 nil gc_pointed_to "gimple_statement_eh_ctrl" + (!srcfileloc "gimple.h" 498) + (!fields 1 + (!pair "region" + (!type already_seen 2) + (!srcfileloc "gimple.h" 497) + nil ) + ) + (!options + (!option tag string "GSS_EH_CTRL") + ) + 1023 nil + (!type already_seen 282) + ) + ) + + (!type struct 787 nil gc_pointed_to "geh_dispatch" + (!srcfileloc "gimple.h" 512) + (!fields 0 ) + (!options + (!option tag string "GSS_EH_CTRL") + ) + 1023 nil + (!type already_seen 786) + ) + + (!type struct 788 nil gc_pointed_to "gphi" + (!srcfileloc "gimple.h" 485) + (!fields 4 + (!pair "capacity" + (!type already_seen 2) + (!srcfileloc "gimple.h" 477) + nil ) + (!pair "nargs" + (!type already_seen 2) + (!srcfileloc "gimple.h" 478) + nil ) + (!pair "result" + (!type already_seen 23) + (!srcfileloc "gimple.h" 481) + nil ) + (!pair "args" + (!type array 789 nil gc_used "1" + (!type struct 790 nil gc_used "phi_arg_d" + (!srcfileloc "tree-core.h" 1613) + (!fields 3 + (!pair "imm_use" + (!type already_seen 557) + (!srcfileloc "tree-core.h" 1610) + nil ) + (!pair "def" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 1611) + nil ) + (!pair "locus" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 1612) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "gimple.h" 484) + (!options + (!option length string "%h.nargs") + ) + ) + ) + (!options + (!option tag string "GSS_PHI") + ) + 1023 nil + (!type already_seen 282) + ) + + (!type struct 791 nil gc_pointed_to "gtry" + (!srcfileloc "gimple.h" 529) + (!fields 2 + (!pair "eval" + (!type already_seen 281) + (!srcfileloc "gimple.h" 524) + nil ) + (!pair "cleanup" + (!type already_seen 281) + (!srcfileloc "gimple.h" 528) + nil ) + ) + (!options + (!option tag string "GSS_TRY") + ) + 1023 nil + (!type already_seen 282) + ) + + (!type struct 792 nil gc_pointed_to "gomp_atomic_load" + (!srcfileloc "gimple.h" 797) + (!fields 2 + (!pair "rhs" + (!type already_seen 23) + (!srcfileloc "gimple.h" 796) + nil ) + (!pair "lhs" + (!type already_seen 23) + (!srcfileloc "gimple.h" 796) + nil ) + ) + (!options + (!option tag string "GSS_OMP_ATOMIC_LOAD") + ) + 1023 nil + (!type already_seen 282) + ) + + (!type struct 793 nil gc_pointed_to "gomp_atomic_store" + (!srcfileloc "gimple.h" 817) + (!fields 0 ) + (!options + (!option tag string "GSS_OMP_ATOMIC_STORE_LAYOUT") + ) + 1023 nil + (!type struct 794 nil gc_pointed_to "gimple_statement_omp_atomic_store_layout" + (!srcfileloc "gimple.h" 809) + (!fields 1 + (!pair "val" + (!type already_seen 23) + (!srcfileloc "gimple.h" 808) + nil ) + ) + (!options + (!option tag string "GSS_OMP_ATOMIC_STORE_LAYOUT") + ) + 1023 nil + (!type already_seen 282) + ) + ) + + (!type struct 795 nil gc_pointed_to "gomp_continue" + (!srcfileloc "gimple.h" 743) + (!fields 2 + (!pair "control_def" + (!type already_seen 23) + (!srcfileloc "gimple.h" 739) + nil ) + (!pair "control_use" + (!type already_seen 23) + (!srcfileloc "gimple.h" 742) + nil ) + ) + (!options + (!option tag string "GSS_OMP_CONTINUE") + ) + 1023 nil + (!type already_seen 282) + ) + + (!type struct 796 nil gc_pointed_to "gomp_critical" + (!srcfileloc "gimple.h" 601) + (!fields 2 + (!pair "clauses" + (!type already_seen 23) + (!srcfileloc "gimple.h" 596) + nil ) + (!pair "name" + (!type already_seen 23) + (!srcfileloc "gimple.h" 600) + nil ) + ) + (!options + (!option tag string "GSS_OMP_CRITICAL") + ) + 1023 nil + (!type struct 797 nil gc_pointed_to "gimple_statement_omp" + (!srcfileloc "gimple.h" 389) + (!fields 1 + (!pair "body" + (!type already_seen 281) + (!srcfileloc "gimple.h" 388) + nil ) + ) + (!options + (!option tag string "GSS_OMP") + ) + 1023 nil + (!type already_seen 282) + ) + ) + + (!type struct 798 nil gc_pointed_to "gomp_ordered" + (!srcfileloc "gimple.h" 776) + (!fields 0 ) + (!options + (!option tag string "GSS_OMP_SINGLE_LAYOUT") + ) + 1023 nil + (!type struct 799 nil gc_pointed_to "gimple_statement_omp_single_layout" + (!srcfileloc "gimple.h" 755) + (!fields 1 + (!pair "clauses" + (!type already_seen 23) + (!srcfileloc "gimple.h" 754) + nil ) + ) + (!options + (!option tag string "GSS_OMP_SINGLE_LAYOUT") + ) + 1023 nil + (!type already_seen 797) + ) + ) + + (!type struct 800 nil gc_pointed_to "gomp_for" + (!srcfileloc "gimple.h" 641) + (!fields 4 + (!pair "clauses" + (!type already_seen 23) + (!srcfileloc "gimple.h" 629) + nil ) + (!pair "collapse" + (!type already_seen 2) + (!srcfileloc "gimple.h" 633) + nil ) + (!pair "iter" + (!type pointer 801 nil gc_unused + (!type struct 802 + (!type already_seen 801) + gc_used "gimple_omp_for_iter" + (!srcfileloc "gimple.h" 619) + (!fields 5 + (!pair "cond" + (!type already_seen 2) + (!srcfileloc "gimple.h" 606) + nil ) + (!pair "index" + (!type already_seen 23) + (!srcfileloc "gimple.h" 609) + nil ) + (!pair "initial" + (!type already_seen 23) + (!srcfileloc "gimple.h" 612) + nil ) + (!pair "final" + (!type already_seen 23) + (!srcfileloc "gimple.h" 615) + nil ) + (!pair "incr" + (!type already_seen 23) + (!srcfileloc "gimple.h" 618) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "gimple.h" 636) + (!options + (!option length string "%h.collapse") + ) + ) + (!pair "pre_body" + (!type already_seen 281) + (!srcfileloc "gimple.h" 640) + nil ) + ) + (!options + (!option tag string "GSS_OMP_FOR") + ) + 1023 nil + (!type already_seen 797) + ) + + (!type struct 803 nil gc_pointed_to "gomp_parallel" + (!srcfileloc "gimple.h" 680) + (!fields 0 ) + (!options + (!option tag string "GSS_OMP_PARALLEL_LAYOUT") + ) + 1023 nil + (!type struct 804 nil gc_pointed_to "gimple_statement_omp_taskreg" + (!srcfileloc "gimple.h" 672) + (!fields 0 ) + (!options + (!option tag string "GSS_OMP_PARALLEL_LAYOUT") + ) + 1023 nil + (!type struct 805 nil gc_pointed_to "gimple_statement_omp_parallel_layout" + (!srcfileloc "gimple.h" 662) + (!fields 3 + (!pair "clauses" + (!type already_seen 23) + (!srcfileloc "gimple.h" 653) + nil ) + (!pair "child_fn" + (!type already_seen 23) + (!srcfileloc "gimple.h" 657) + nil ) + (!pair "data_arg" + (!type already_seen 23) + (!srcfileloc "gimple.h" 661) + nil ) + ) + (!options + (!option tag string "GSS_OMP_PARALLEL_LAYOUT") + ) + 1023 nil + (!type already_seen 797) + ) + ) + ) + + (!type struct 806 nil gc_pointed_to "gomp_task" + (!srcfileloc "gimple.h" 705) + (!fields 3 + (!pair "copy_fn" + (!type already_seen 23) + (!srcfileloc "gimple.h" 699) + nil ) + (!pair "arg_size" + (!type already_seen 23) + (!srcfileloc "gimple.h" 703) + nil ) + (!pair "arg_align" + (!type already_seen 23) + (!srcfileloc "gimple.h" 704) + nil ) + ) + (!options + (!option tag string "GSS_OMP_TASK") + ) + 1023 nil + (!type already_seen 804) + ) + + (!type struct 807 nil gc_pointed_to "gomp_sections" + (!srcfileloc "gimple.h" 726) + (!fields 2 + (!pair "clauses" + (!type already_seen 23) + (!srcfileloc "gimple.h" 720) + nil ) + (!pair "control" + (!type already_seen 23) + (!srcfileloc "gimple.h" 725) + nil ) + ) + (!options + (!option tag string "GSS_OMP_SECTIONS") + ) + 1023 nil + (!type already_seen 797) + ) + + (!type struct 808 nil gc_pointed_to "gomp_single" + (!srcfileloc "gimple.h" 762) + (!fields 0 ) + (!options + (!option tag string "GSS_OMP_SINGLE_LAYOUT") + ) + 1023 nil + (!type already_seen 799) + ) + + (!type struct 809 nil gc_pointed_to "gomp_target" + (!srcfileloc "gimple.h" 688) + (!fields 0 ) + (!options + (!option tag string "GSS_OMP_PARALLEL_LAYOUT") + ) + 1023 nil + (!type already_seen 805) + ) + + (!type struct 810 nil gc_pointed_to "gomp_teams" + (!srcfileloc "gimple.h" 769) + (!fields 0 ) + (!options + (!option tag string "GSS_OMP_PARALLEL_LAYOUT") + ) + 1023 nil + (!type already_seen 804) + ) + + (!type already_seen 319) + + (!type struct 811 + (!type pointer 812 nil gc_used + (!type already_seen 811) + ) + gc_pointed_to "cgraph_node" + (!srcfileloc "cgraph.h" 1505) + (!fields 48 + (!pair "DEBUG_FUNCTION" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1081) + nil ) + (!pair "DEBUG_FUNCTION" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1093) + nil ) + (!pair "callees" + (!type pointer 813 nil gc_used + (!type struct 814 + (!type already_seen 813) + gc_pointed_to "cgraph_edge" + (!srcfileloc "cgraph.h" 1976) + (!fields 22 + (!pair "count" + (!type already_seen 300) + (!srcfileloc "cgraph.h" 1887) + nil ) + (!pair "caller" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 1888) + nil ) + (!pair "callee" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 1889) + nil ) + (!pair "prev_caller" + (!type already_seen 813) + (!srcfileloc "cgraph.h" 1890) + nil ) + (!pair "next_caller" + (!type already_seen 813) + (!srcfileloc "cgraph.h" 1891) + nil ) + (!pair "prev_callee" + (!type already_seen 813) + (!srcfileloc "cgraph.h" 1892) + nil ) + (!pair "next_callee" + (!type already_seen 813) + (!srcfileloc "cgraph.h" 1893) + nil ) + (!pair "call_stmt" + (!type already_seen 775) + (!srcfileloc "cgraph.h" 1894) + nil ) + (!pair "indirect_info" + (!type pointer 815 nil gc_used + (!type struct 816 + (!type already_seen 815) + gc_pointed_to "cgraph_indirect_call_info" + (!srcfileloc "cgraph.h" 1897) + (!fields 13 + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1650) + nil ) + (!pair "context" + (!type struct 817 nil gc_used "ipa_polymorphic_call_context" + (!srcfileloc "cgraph.h" 1641) + (!fields 10 + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1551) + nil ) + (!pair "speculative_offset" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1552) + nil ) + (!pair "outer_type" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 1553) + nil ) + (!pair "speculative_outer_type" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 1554) + nil ) + (!pair "maybe_in_construction" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1556) + nil ) + (!pair "maybe_derived_type" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1558) + nil ) + (!pair "speculative_maybe_derived_type" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1561) + nil ) + (!pair "invalid" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1564) + nil ) + (!pair "dynamic" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1566) + nil ) + (!pair "DEBUG_FUNCTION" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1628) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "cgraph.h" 1652) + nil ) + (!pair "otr_token" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1654) + nil ) + (!pair "otr_type" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 1656) + nil ) + (!pair "param_index" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1658) + nil ) + (!pair "ecf_flags" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1660) + nil ) + (!pair "num_speculative_call_targets" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1663) + nil ) + (!pair "polymorphic" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1667) + nil ) + (!pair "agg_contents" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1670) + nil ) + (!pair "member_ptr" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1672) + nil ) + (!pair "by_ref" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1675) + nil ) + (!pair "guaranteed_unmodified" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1680) + nil ) + (!pair "vptr_changed" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1683) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "cgraph.h" 1897) + nil ) + (!pair "aux" + (!type already_seen 3) + (!srcfileloc "cgraph.h" 1898) + (!options + (!option skip string "") + ) + ) + (!pair "inline_failed" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1901) + nil ) + (!pair "lto_stmt_uid" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1904) + nil ) + (!pair "speculative_id" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1907) + nil ) + (!pair "indirect_inlining_edge" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1909) + nil ) + (!pair "indirect_unknown_callee" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1912) + nil ) + (!pair "call_stmt_cannot_inline_p" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1915) + nil ) + (!pair "can_throw_external" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1917) + nil ) + (!pair "speculative" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1935) + nil ) + (!pair "in_polymorphic_cdtor" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1938) + nil ) + (!pair "m_uid" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1952) + nil ) + (!pair "m_summary_id" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1955) + nil ) + (!pair "DEBUG_FUNCTION" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1971) + nil ) + ) + (!options + (!option for_user string "") + (!option chain_prev string "%h.prev_caller") + (!option chain_next string "%h.next_caller") + ) + 1023 nil nil ) + ) + (!srcfileloc "cgraph.h" 1386) + nil ) + (!pair "callers" + (!type already_seen 813) + (!srcfileloc "cgraph.h" 1387) + nil ) + (!pair "indirect_calls" + (!type already_seen 813) + (!srcfileloc "cgraph.h" 1390) + nil ) + (!pair "next_sibling_clone" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 1391) + nil ) + (!pair "prev_sibling_clone" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 1392) + nil ) + (!pair "clones" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 1393) + nil ) + (!pair "clone_of" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 1394) + nil ) + (!pair "call_site_hash" + (!type pointer 818 nil gc_used + (!type user_struct 819 + (!type already_seen 818) + gc_pointed_to "hash_table" + (!srcfileloc "cgraph.h" 1397) + (!fields 1 + (!pair "cgraph_edge_hasher" + (!type struct 820 nil gc_used "cgraph_edge_hasher" + (!srcfileloc "cgraph.h" 1397) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "cgraph.h" 1397) + nil ) + ) + ) + ) + (!srcfileloc "cgraph.h" 1397) + nil ) + (!pair "former_clone_of" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 1399) + nil ) + (!pair "simdclone" + (!type pointer 821 nil gc_used + (!type struct 822 + (!type already_seen 821) + gc_pointed_to "cgraph_simd_clone" + (!srcfileloc "cgraph.h" 1403) + (!fields 11 + (!pair "simdlen" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 781) + nil ) + (!pair "nargs" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 785) + nil ) + (!pair "vecsize_int" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 788) + nil ) + (!pair "vecsize_float" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 791) + nil ) + (!pair "mask_mode" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 796) + nil ) + (!pair "vecsize_mangle" + (!type already_seen 8) + (!srcfileloc "cgraph.h" 801) + nil ) + (!pair "inbranch" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 805) + nil ) + (!pair "prev_clone" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 808) + nil ) + (!pair "next_clone" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 808) + nil ) + (!pair "origin" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 811) + nil ) + (!pair "args" + (!type array 823 nil gc_used "1" + (!type struct 824 nil gc_used "cgraph_simd_clone_arg" + (!srcfileloc "cgraph.h" 814) + (!fields 8 + (!pair "orig_arg" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 737) + nil ) + (!pair "orig_type" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 741) + nil ) + (!pair "vector_arg" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 749) + nil ) + (!pair "vector_type" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 752) + nil ) + (!pair "simd_array" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 761) + nil ) + (!pair "arg_type" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 765) + nil ) + (!pair "alignment" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 768) + nil ) + (!pair "linear_step" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 774) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "cgraph.h" 814) + (!options + (!option length string "%h.nargs") + ) + ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "cgraph.h" 1403) + nil ) + (!pair "simd_clones" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 1405) + nil ) + (!pair "ipa_transforms_to_apply" + (!type user_struct 825 nil gc_unused "vec" + (!srcfileloc "cgraph.h" 1410) + (!fields 3 + (!pair "vl_ptr" + (!type undefined 826 nil gc_unused "vl_ptr" + (!srcfileloc "cgraph.h" 1410) + ) + (!srcfileloc "cgraph.h" 1410) + nil ) + (!pair "va_heap" + (!type undefined 827 nil gc_unused "va_heap" + (!srcfileloc "cgraph.h" 1410) + ) + (!srcfileloc "cgraph.h" 1410) + nil ) + (!pair "ipa_opt_pass" + (!type pointer 828 nil gc_unused + (!type struct 829 + (!type already_seen 828) + gc_unused "ipa_opt_pass_d" + (!srcfileloc "cgraph.h" 38) + (!fields 0 ) + nil 1023 nil nil ) + ) + (!srcfileloc "cgraph.h" 1410) + nil ) + ) + ) + (!srcfileloc "cgraph.h" 1410) + (!options + (!option skip string "") + ) + ) + (!pair "inlined_to" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 1414) + nil ) + (!pair "rtl" + (!type pointer 830 nil gc_used + (!type struct 831 + (!type already_seen 830) + gc_pointed_to "cgraph_rtl_info" + (!srcfileloc "rtl.h" 4558) + (!fields 2 + (!pair "preferred_incoming_stack_boundary" + (!type already_seen 2) + (!srcfileloc "rtl.h" 4553) + nil ) + (!pair "function_used_regs" + (!type already_seen 2) + (!srcfileloc "rtl.h" 4557) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "cgraph.h" 1416) + nil ) + (!pair "count" + (!type already_seen 300) + (!srcfileloc "cgraph.h" 1419) + nil ) + (!pair "count_materialization_scale" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1422) + nil ) + (!pair "profile_id" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1424) + nil ) + (!pair "unit_id" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1426) + nil ) + (!pair "tp_first_run" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1428) + nil ) + (!pair "thunk" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1431) + nil ) + (!pair "used_as_abstract_origin" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1434) + nil ) + (!pair "lowered" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1436) + nil ) + (!pair "process" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1439) + nil ) + (!pair "frequency" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1442) + nil ) + (!pair "only_called_at_startup" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1444) + nil ) + (!pair "only_called_at_exit" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1446) + nil ) + (!pair "tm_clone" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1451) + nil ) + (!pair "dispatcher_function" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1453) + nil ) + (!pair "calls_comdat_local" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1456) + nil ) + (!pair "icf_merged" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1458) + nil ) + (!pair "nonfreeing_fn" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1462) + nil ) + (!pair "merged_comdat" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1464) + nil ) + (!pair "merged_extern_inline" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1466) + nil ) + (!pair "parallelized_function" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1468) + nil ) + (!pair "split_part" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1470) + nil ) + (!pair "indirect_call_target" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1472) + nil ) + (!pair "local" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1475) + nil ) + (!pair "versionable" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1477) + nil ) + (!pair "can_change_signature" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1480) + nil ) + (!pair "redefined_extern_inline" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1483) + nil ) + (!pair "tm_may_enter_irr" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1485) + nil ) + (!pair "ipcp_clone" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1487) + nil ) + (!pair "declare_variant_alt" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1490) + nil ) + (!pair "calls_declare_variant_alt" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1492) + nil ) + (!pair "m_uid" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1496) + nil ) + (!pair "m_summary_id" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1499) + nil ) + ) + (!options + (!option tag string "SYMTAB_FUNCTION") + ) + 1023 nil + (!type already_seen 319) + ) + + (!type struct 832 nil gc_pointed_to "varpool_node" + (!srcfileloc "cgraph.h" 2117) + (!fields 5 + (!pair "DEBUG_FUNCTION" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 1996) + nil ) + (!pair "output" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2095) + nil ) + (!pair "dynamically_initialized" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2099) + nil ) + (!pair "tls_model" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2101) + nil ) + (!pair "used_by_single_function" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2107) + nil ) + ) + (!options + (!option tag string "SYMTAB_VARIABLE") + ) + 1023 nil + (!type already_seen 319) + ) + + (!type already_seen 814) + + (!type already_seen 214) + + (!type struct 833 + (!type pointer 834 nil gc_unused + (!type already_seen 833) + ) + gc_unused "gcc_options" + (!srcfileloc "c-family/c-pragma.cc" 1051) + (!fields 0 ) + nil 774 nil nil ) + + (!type already_seen 623) + + (!type already_seen 574) + + (!type struct 835 nil gc_unused "cl_option"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 836 nil gc_unused "cl_decoded_option"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 837 nil gc_unused "cl_option_handlers"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 838 nil gc_unused "diagnostic_context"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 839 nil gc_unused "pretty_printer"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 840 nil gc_unused "diagnostic_event_id_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 841 nil gc_unused "bitmap_view"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 273) + + (!type already_seen 269) + + (!type struct 842 nil gc_unused "rtl_opt_pass"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 843 nil gc_unused "context"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 844 nil gc_unused "std::pair" + (!srcfileloc "coretypes.h" 362) + (!fields 2 + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "coretypes.h" 362) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "coretypes.h" 362) + nil ) + ) + ) + + (!type user_struct 845 nil gc_unused "std::pair" + (!srcfileloc "coretypes.h" 363) + (!fields 1 + (!pair "int" + (!type already_seen 373) + (!srcfileloc "coretypes.h" 363) + nil ) + ) + ) + + (!type already_seen 373) + + (!type struct 846 nil gc_unused "kv_pair"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 847 nil gc_unused "_dont_use_rtx_here_"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 848 nil gc_unused "_dont_use_rtvec_here_"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 849 nil gc_unused "_dont_use_rtx_insn_here_"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type union 850 nil gc_unused "_dont_use_tree_here_"nil + (!fields 0 ) + nil 0 nil ) + + (!type struct 851 nil gc_unused "cpp_reason_option_codes_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 852 nil gc_unused "stringop_algs"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 853 nil gc_unused "processor_costs"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 854 nil gc_unused "ix86_args"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 855 nil gc_unused "pta"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 425) + + (!type already_seen 426) + + (!type already_seen 428) + + (!type already_seen 423) + + (!type already_seen 421) + + (!type undefined 856 nil gc_unused "TARGET_UNIT" + (!srcfileloc "defaults.h" 1455) + ) + + (!type struct 857 + (!type pointer 858 nil gc_unused + (!type already_seen 857) + ) + gc_unused "splay_tree_node_s"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 859 + (!type pointer 860 nil gc_unused + (!type already_seen 859) + ) + gc_unused "splay_tree_s"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 861 nil gc_unused "bitmap_usage"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 389) + + (!type struct 862 nil gc_unused "bitmap_iterator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 863 nil gc_unused "auto_bitmap"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 864 nil gc_unused "base_bitmap_view"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 498) + + (!type already_seen 497) + + (!type undefined 865 nil gc_unused "FIXED_WIDE_INT" + (!srcfileloc "wide-int.h" 323) + ) + + (!type user_struct 866 nil gc_unused "generic_wide_int>" + (!srcfileloc "wide-int.h" 327) + (!fields 1 + (!pair "fixed_wide_int_storage>" + (!srcfileloc "wide-int.h" 334) + (!fields 1 + (!pair "wide_int_ref_storageprecision>>" + (!srcfileloc "wide-int.h" 438) + (!fields 2 + (!pair "precision" + (!type undefined 875 nil gc_unused "precision" + (!srcfileloc "wide-int.h" 438) + ) + (!srcfileloc "wide-int.h" 438) + nil ) + (!pair "fixed_wide_int_storageprecision>>" + (!srcfileloc "wide-int.h" 459) + (!fields 2 + (!pair "precision" + (!type already_seen 875) + (!srcfileloc "wide-int.h" 459) + nil ) + (!pair "fixed_wide_int_storage" + (!srcfileloc "wide-int.h" 1366) + (!fields 1 + (!pair "trailing_wide_int_storage" + (!type already_seen 890) + (!srcfileloc "wide-int.h" 1366) + nil ) + ) + ) + + (!type user_struct 892 nil gc_unused "trailing_wide_ints" + (!srcfileloc "wide-int.h" 1408) + (!fields 0 ) + ) + + (!type struct 893 nil gc_unused "primitive_int_traits"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 894 nil gc_unused "hwi_with_prec"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 895 nil gc_unused "ints_for"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 896 nil gc_unused "never_used1"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 897 nil gc_unused "never_used2"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 898 + (!type pointer 899 nil gc_used + (!type already_seen 898) + ) + gc_pointed_to "coverage_data" + (!srcfileloc "coverage.cc" 66) + (!fields 6 + (!pair "next" + (!type already_seen 899) + (!srcfileloc "coverage.cc" 60) + nil ) + (!pair "ident" + (!type already_seen 2) + (!srcfileloc "coverage.cc" 61) + nil ) + (!pair "lineno_checksum" + (!type already_seen 2) + (!srcfileloc "coverage.cc" 62) + nil ) + (!pair "cfg_checksum" + (!type already_seen 2) + (!srcfileloc "coverage.cc" 63) + nil ) + (!pair "fn_decl" + (!type already_seen 23) + (!srcfileloc "coverage.cc" 64) + nil ) + (!pair "ctr_vars" + (!type array 900 nil gc_used "GCOV_COUNTERS" + (!type already_seen 23) + ) + (!srcfileloc "coverage.cc" 65) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 1023 nil nil ) + + (!type struct 901 nil gc_unused "counts_entry"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 902 + (!type pointer 903 nil gc_unused + (!type already_seen 902) + ) + gc_unused "predefined_function_abi" + (!srcfileloc "emit-rtl.h" 75) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 904 nil gc_unused "addr_diff_vec_flags"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 228) + + (!type already_seen 239) + + (!type union 905 nil gc_unused "rtunion"nil + (!fields 0 ) + nil 0 nil ) + + (!type struct 906 nil gc_unused "reg_info" + (!srcfileloc "rtl.h" 230) + (!fields 4 + (!pair "regno" + (!type already_seen 2) + (!srcfileloc "rtl.h" 222) + nil ) + (!pair "nregs" + (!type already_seen 2) + (!srcfileloc "rtl.h" 225) + nil ) + (!pair "unused" + (!type already_seen 2) + (!srcfileloc "rtl.h" 226) + nil ) + (!pair "attrs" + (!type already_seen 238) + (!srcfileloc "rtl.h" 229) + nil ) + ) + nil 1023 nil nil ) + + (!type already_seen 210) + + (!type already_seen 212) + + (!type already_seen 208) + + (!type already_seen 221) + + (!type already_seen 79) + + (!type user_struct 907 nil gc_unused "trailing_wide_ints" + (!srcfileloc "rtl.h" 291) + (!fields 1 + (!pair "NUM_POLY_INT_COEFFS" + (!type undefined 908 nil gc_unused "NUM_POLY_INT_COEFFS" + (!srcfileloc "rtl.h" 291) + ) + (!srcfileloc "rtl.h" 291) + nil ) + ) + ) + + (!type already_seen 908) + + (!type struct 909 nil gc_unused "const_poly_int_def" + (!srcfileloc "rtl.h" 292) + (!fields 1 + (!pair "coeffs" + (!type already_seen 907) + (!srcfileloc "rtl.h" 291) + nil ) + ) + (!options + (!option variable_size string "") + ) + 1023 nil nil ) + + (!type already_seen 103) + + (!type already_seen 102) + + (!type already_seen 39) + + (!type already_seen 43) + + (!type union 910 nil gc_used "u" + (!srcfileloc "ggc-tests.cc" 129) + (!fields 2 + (!pair "u_test_struct" + (!type pointer 911 nil gc_used + (!type struct 912 + (!type already_seen 911) + gc_pointed_to "test_struct" + (!srcfileloc "ggc-tests.cc" 42) + (!fields 1 + (!pair "other" + (!type already_seen 911) + (!srcfileloc "ggc-tests.cc" 39) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "ggc-tests.cc" 127) + (!options + (!option tag string "WHICH_FIELD_USE_TEST_STRUCT") + ) + ) + (!pair "u_test_other" + (!type pointer 913 nil gc_used + (!type struct 914 + (!type already_seen 913) + gc_pointed_to "test_other" + (!srcfileloc "ggc-tests.cc" 128) + (!fields 2 + (!pair "dummy" + (!type array 915 nil gc_used "256" + (!type already_seen 8) + ) + (!srcfileloc "ggc-tests.cc" 96) + nil ) + (!pair "m_ptr" + (!type already_seen 911) + (!srcfileloc "ggc-tests.cc" 97) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "ggc-tests.cc" 128) + (!options + (!option tag string "WHICH_FIELD_USE_TEST_OTHER") + ) + ) + ) + nil 1023 nil ) + + (!type already_seen 224) + + (!type already_seen 267) + + (!type already_seen 222) + + (!type already_seen 315) + + (!type already_seen 314) + + (!type already_seen 313) + + (!type already_seen 312) + + (!type already_seen 311) + + (!type already_seen 310) + + (!type already_seen 309) + + (!type already_seen 308) + + (!type already_seen 307) + + (!type already_seen 306) + + (!type already_seen 305) + + (!type already_seen 304) + + (!type already_seen 303) + + (!type already_seen 302) + + (!type already_seen 301) + + (!type already_seen 266) + + (!type already_seen 265) + + (!type already_seen 264) + + (!type already_seen 263) + + (!type already_seen 262) + + (!type already_seen 261) + + (!type already_seen 260) + + (!type already_seen 259) + + (!type already_seen 258) + + (!type already_seen 257) + + (!type already_seen 256) + + (!type already_seen 255) + + (!type already_seen 254) + + (!type already_seen 253) + + (!type already_seen 252) + + (!type already_seen 251) + + (!type already_seen 250) + + (!type already_seen 249) + + (!type already_seen 248) + + (!type already_seen 247) + + (!type already_seen 246) + + (!type already_seen 245) + + (!type already_seen 244) + + (!type already_seen 243) + + (!type already_seen 242) + + (!type already_seen 241) + + (!type already_seen 240) + + (!type already_seen 237) + + (!type already_seen 236) + + (!type already_seen 235) + + (!type already_seen 234) + + (!type already_seen 233) + + (!type already_seen 229) + + (!type already_seen 226) + + (!type already_seen 225) + + (!type already_seen 207) + + (!type already_seen 206) + + (!type already_seen 205) + + (!type already_seen 204) + + (!type already_seen 203) + + (!type already_seen 202) + + (!type already_seen 201) + + (!type already_seen 200) + + (!type already_seen 199) + + (!type already_seen 198) + + (!type already_seen 197) + + (!type already_seen 196) + + (!type already_seen 195) + + (!type already_seen 194) + + (!type already_seen 193) + + (!type already_seen 192) + + (!type already_seen 191) + + (!type already_seen 190) + + (!type already_seen 189) + + (!type already_seen 188) + + (!type already_seen 187) + + (!type already_seen 186) + + (!type already_seen 185) + + (!type already_seen 184) + + (!type already_seen 183) + + (!type already_seen 182) + + (!type already_seen 181) + + (!type already_seen 180) + + (!type already_seen 179) + + (!type already_seen 178) + + (!type already_seen 177) + + (!type already_seen 176) + + (!type already_seen 175) + + (!type already_seen 174) + + (!type already_seen 173) + + (!type already_seen 172) + + (!type already_seen 171) + + (!type already_seen 170) + + (!type already_seen 169) + + (!type already_seen 168) + + (!type already_seen 167) + + (!type already_seen 166) + + (!type already_seen 165) + + (!type already_seen 164) + + (!type already_seen 163) + + (!type already_seen 162) + + (!type already_seen 161) + + (!type already_seen 160) + + (!type already_seen 159) + + (!type already_seen 158) + + (!type already_seen 157) + + (!type already_seen 156) + + (!type already_seen 155) + + (!type already_seen 154) + + (!type already_seen 153) + + (!type already_seen 152) + + (!type already_seen 151) + + (!type already_seen 150) + + (!type already_seen 149) + + (!type already_seen 148) + + (!type already_seen 147) + + (!type already_seen 146) + + (!type already_seen 145) + + (!type already_seen 144) + + (!type already_seen 143) + + (!type already_seen 142) + + (!type already_seen 141) + + (!type already_seen 140) + + (!type already_seen 139) + + (!type already_seen 138) + + (!type already_seen 137) + + (!type already_seen 136) + + (!type already_seen 135) + + (!type already_seen 134) + + (!type already_seen 133) + + (!type already_seen 132) + + (!type already_seen 131) + + (!type already_seen 130) + + (!type already_seen 129) + + (!type already_seen 128) + + (!type already_seen 127) + + (!type already_seen 126) + + (!type already_seen 125) + + (!type already_seen 124) + + (!type already_seen 123) + + (!type already_seen 122) + + (!type already_seen 121) + + (!type already_seen 120) + + (!type already_seen 119) + + (!type already_seen 118) + + (!type already_seen 117) + + (!type already_seen 116) + + (!type already_seen 115) + + (!type already_seen 114) + + (!type already_seen 113) + + (!type already_seen 112) + + (!type already_seen 111) + + (!type already_seen 110) + + (!type already_seen 109) + + (!type already_seen 108) + + (!type already_seen 107) + + (!type already_seen 106) + + (!type already_seen 105) + + (!type already_seen 104) + + (!type struct 916 nil gc_unused "full_rtx_costs"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 917 nil gc_unused "subreg_shape"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 918 nil gc_unused "address_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 919 nil gc_unused "std::pair" + (!srcfileloc "rtl.h" 2252) + (!fields 2 + (!pair "machine_mode" + (!type already_seen 2) + (!srcfileloc "rtl.h" 2252) + nil ) + (!pair "rtx" + (!type already_seen 99) + (!srcfileloc "rtl.h" 2252) + nil ) + ) + ) + + (!type user_struct 920 nil gc_unused "poly_int>>" + (!srcfileloc "rtl.h" 2340) + (!fields 3 + (!pair "false" + (!type already_seen 872) + (!srcfileloc "rtl.h" 2340) + nil ) + (!pair "generic_wide_int" + (!srcfileloc "tree.h" 5653) + (!fields 3 + (!pair "tree_cache_traits" + (!type already_seen 934) + (!srcfileloc "tree.h" 5653) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "tree.h" 5653) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "tree.h" 5653) + nil ) + ) + ) + + (!type struct 936 nil gc_used "decl_tree_cache_traits" + (!srcfileloc "tree.h" 5659) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 937 + (!type pointer 938 nil gc_used + (!type already_seen 937) + ) + gc_pointed_to "hash_map" + (!srcfileloc "tree.h" 5659) + (!fields 3 + (!pair "decl_tree_cache_traits" + (!type already_seen 936) + (!srcfileloc "tree.h" 5659) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "tree.h" 5659) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "tree.h" 5659) + nil ) + ) + ) + + (!type struct 939 nil gc_used "type_tree_cache_traits" + (!srcfileloc "tree.h" 5665) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 940 + (!type pointer 941 nil gc_used + (!type already_seen 940) + ) + gc_pointed_to "hash_map" + (!srcfileloc "tree.h" 5665) + (!fields 3 + (!pair "type_tree_cache_traits" + (!type already_seen 939) + (!srcfileloc "tree.h" 5665) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "tree.h" 5665) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "tree.h" 5665) + nil ) + ) + ) + + (!type struct 942 nil gc_used "decl_tree_traits" + (!srcfileloc "tree.h" 5670) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 943 + (!type pointer 944 nil gc_used + (!type already_seen 943) + ) + gc_pointed_to "hash_map" + (!srcfileloc "tree.h" 5670) + (!fields 3 + (!pair "decl_tree_traits" + (!type already_seen 942) + (!srcfileloc "tree.h" 5670) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "tree.h" 5670) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "tree.h" 5670) + nil ) + ) + ) + + (!type struct 945 nil gc_unused "unextended_tree" + (!srcfileloc "tree.h" 6018) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 946 nil gc_unused "extended_tree"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 947 nil gc_unused "extended_tree" + (!srcfileloc "tree.h" 5998) + (!fields 1 + (!pair "WIDE_INT_MAX_PRECISION" + (!type already_seen 865) + (!srcfileloc "tree.h" 5998) + nil ) + ) + ) + + (!type user_struct 948 nil gc_unused "extended_tree" + (!srcfileloc "tree.h" 5999) + (!fields 1 + (!pair "ADDR_MAX_PRECISION" + (!type already_seen 865) + (!srcfileloc "tree.h" 5999) + nil ) + ) + ) + + (!type user_struct 949 nil gc_unused "generic_wide_int" + (!srcfileloc "tree.h" 6001) + (!fields 1 + (!pair "widest_extended_tree" + (!type already_seen 947) + (!srcfileloc "tree.h" 6001) + nil ) + ) + ) + + (!type user_struct 950 nil gc_unused "generic_wide_int" + (!srcfileloc "tree.h" 6002) + (!fields 1 + (!pair "offset_extended_tree" + (!type already_seen 948) + (!srcfileloc "tree.h" 6002) + nil ) + ) + ) + + (!type user_struct 951 nil gc_unused "generic_wide_int>" + (!srcfileloc "tree.h" 6003) + (!fields 2 + (!pair "false" + (!type already_seen 872) + (!srcfileloc "tree.h" 6003) + nil ) + (!pair "wide_int_ref_storage>" + (!srcfileloc "tree.h" 6012) + (!fields 2 + (!pair "generic_wide_int>" + (!srcfileloc "tree.h" 6015) + (!fields 2 + (!pair "generic_wide_int>" + (!srcfileloc "tree.h" 6018) + (!fields 2 + (!pair "generic_wide_int>" + (!srcfileloc "tree.h" 6028) + (!fields 1 + (!pair "extended_tree" + (!srcfileloc "tree.h" 6035) + (!fields 1 + (!pair "unextended_tree" + (!type already_seen 945) + (!srcfileloc "tree.h" 6035) + nil ) + ) + ) + + (!type user_struct 962 nil gc_unused "generic_wide_int>" + (!srcfileloc "tree.h" 6042) + (!fields 1 + (!pair "wi::extended_tree" + (!srcfileloc "tree-core.h" 2275) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "tree-core.h" 2275) + nil ) + (!pair "alias_pair" + (!type already_seen 969) + (!srcfileloc "tree-core.h" 2275) + nil ) + ) + ) + + (!type struct 985 nil gc_pointed_to "libfunc_entry" + (!srcfileloc "libfuncs.h" 44) + (!fields 4 + (!pair "op" + (!type already_seen 2) + (!srcfileloc "libfuncs.h" 42) + nil ) + (!pair "mode1" + (!type already_seen 2) + (!srcfileloc "libfuncs.h" 42) + nil ) + (!pair "mode2" + (!type already_seen 2) + (!srcfileloc "libfuncs.h" 42) + nil ) + (!pair "libfunc" + (!type already_seen 99) + (!srcfileloc "libfuncs.h" 43) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type already_seen 607) + + (!type already_seen 606) + + (!type already_seen 603) + + (!type struct 986 nil gc_unused "ht"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 987 nil gc_unused "_obstack_chunk"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 988 nil gc_unused "obstack"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 989 nil gc_unused "real_format"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 990 nil gc_unused "format_helper"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 991 + (!type pointer 992 nil gc_used + (!type already_seen 991) + ) + gc_pointed_to "sequence_stack" + (!srcfileloc "function.h" 34) + (!fields 3 + (!pair "first" + (!type already_seen 296) + (!srcfileloc "function.h" 31) + nil ) + (!pair "last" + (!type already_seen 296) + (!srcfileloc "function.h" 32) + nil ) + (!pair "next" + (!type already_seen 992) + (!srcfileloc "function.h" 33) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 993 nil gc_used "emit_status" + (!srcfileloc "function.h" 76) + (!fields 7 + (!pair "x_reg_rtx_no" + (!type already_seen 2) + (!srcfileloc "function.h" 41) + nil ) + (!pair "x_first_label_num" + (!type already_seen 2) + (!srcfileloc "function.h" 44) + nil ) + (!pair "seq" + (!type already_seen 991) + (!srcfileloc "function.h" 56) + nil ) + (!pair "x_cur_insn_uid" + (!type already_seen 2) + (!srcfileloc "function.h" 60) + nil ) + (!pair "x_cur_debug_insn_uid" + (!type already_seen 2) + (!srcfileloc "function.h" 64) + nil ) + (!pair "regno_pointer_align_length" + (!type already_seen 2) + (!srcfileloc "function.h" 70) + nil ) + (!pair "regno_pointer_align" + (!type already_seen 11) + (!srcfileloc "function.h" 75) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil nil ) + + (!type user_struct 994 + (!type pointer 995 nil gc_used + (!type already_seen 994) + ) + gc_pointed_to "vec" + (!srcfileloc "function.h" 131) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "function.h" 131) + nil ) + (!pair "rtx_insn" + (!type already_seen 296) + (!srcfileloc "function.h" 131) + nil ) + ) + ) + + (!type struct 996 nil gc_used "expr_status" + (!srcfileloc "function.h" 132) + (!fields 6 + (!pair "x_pending_stack_adjust" + (!type already_seen 2) + (!srcfileloc "function.h" 97) + nil ) + (!pair "x_inhibit_defer_pop" + (!type already_seen 2) + (!srcfileloc "function.h" 114) + nil ) + (!pair "x_stack_pointer_delta" + (!type already_seen 2) + (!srcfileloc "function.h" 120) + nil ) + (!pair "x_saveregs_value" + (!type already_seen 99) + (!srcfileloc "function.h" 125) + nil ) + (!pair "x_apply_args_value" + (!type already_seen 99) + (!srcfileloc "function.h" 128) + nil ) + (!pair "x_forced_labels" + (!type already_seen 995) + (!srcfileloc "function.h" 131) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 997 + (!type pointer 998 nil gc_used + (!type already_seen 997) + ) + gc_pointed_to "call_site_record_d" + (!srcfileloc "except.cc" 168) + (!fields 2 + (!pair "landing_pad" + (!type already_seen 99) + (!srcfileloc "except.cc" 166) + nil ) + (!pair "action" + (!type already_seen 2) + (!srcfileloc "except.cc" 167) + nil ) + ) + nil 1023 nil nil ) + + (!type already_seen 376) + + (!type user_struct 999 + (!type pointer 1000 nil gc_used + (!type already_seen 999) + ) + gc_pointed_to "vec" + (!srcfileloc "function.h" 147) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "function.h" 147) + nil ) + (!pair "call_site_record" + (!type already_seen 998) + (!srcfileloc "function.h" 147) + nil ) + ) + ) + + (!type struct 1001 nil gc_used "rtl_eh" + (!srcfileloc "function.h" 148) + (!fields 7 + (!pair "ehr_stackadj" + (!type already_seen 99) + (!srcfileloc "function.h" 138) + nil ) + (!pair "ehr_handler" + (!type already_seen 99) + (!srcfileloc "function.h" 139) + nil ) + (!pair "ehr_label" + (!type already_seen 365) + (!srcfileloc "function.h" 140) + nil ) + (!pair "sjlj_fc" + (!type already_seen 99) + (!srcfileloc "function.h" 142) + nil ) + (!pair "sjlj_exit_after" + (!type already_seen 296) + (!srcfileloc "function.h" 143) + nil ) + (!pair "action_record_data" + (!type already_seen 375) + (!srcfileloc "function.h" 145) + nil ) + (!pair "call_site_record_v" + (!type array 1002 nil gc_used "2" + (!type already_seen 1000) + ) + (!srcfileloc "function.h" 147) + nil ) + ) + nil 1023 nil nil ) + + (!type already_seen 384) + + (!type already_seen 471) + + (!type already_seen 516) + + (!type struct 1003 + (!type pointer 1004 nil gc_used + (!type already_seen 1003) + ) + gc_pointed_to "rtx_constant_pool" + (!srcfileloc "varasm.cc" 3769) + (!fields 4 + (!pair "first" + (!type already_seen 223) + (!srcfileloc "varasm.cc" 3757) + nil ) + (!pair "last" + (!type already_seen 223) + (!srcfileloc "varasm.cc" 3758) + nil ) + (!pair "const_rtx_htab" + (!type pointer 1005 nil gc_used + (!type user_struct 1006 + (!type already_seen 1005) + gc_pointed_to "hash_table" + (!srcfileloc "varasm.cc" 3764) + (!fields 1 + (!pair "const_rtx_desc_hasher" + (!type struct 1007 nil gc_used "const_rtx_desc_hasher" + (!srcfileloc "varasm.cc" 3764) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "varasm.cc" 3764) + nil ) + ) + ) + ) + (!srcfileloc "varasm.cc" 3764) + nil ) + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "varasm.cc" 3768) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1008 nil gc_used "varasm_status" + (!srcfileloc "function.h" 169) + (!fields 2 + (!pair "pool" + (!type already_seen 1004) + (!srcfileloc "function.h" 164) + nil ) + (!pair "deferred_constants" + (!type already_seen 2) + (!srcfileloc "function.h" 168) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1009 nil gc_used "function_subsections" + (!srcfileloc "function.h" 182) + (!fields 4 + (!pair "hot_section_label" + (!type already_seen 11) + (!srcfileloc "function.h" 178) + nil ) + (!pair "cold_section_label" + (!type already_seen 11) + (!srcfileloc "function.h" 179) + nil ) + (!pair "hot_section_end_label" + (!type already_seen 11) + (!srcfileloc "function.h" 180) + nil ) + (!pair "cold_section_end_label" + (!type already_seen 11) + (!srcfileloc "function.h" 181) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1010 + (!type pointer 1011 nil gc_used + (!type already_seen 1010) + ) + gc_pointed_to "frame_space" + (!srcfileloc "function.h" 194) + (!fields 3 + (!pair "next" + (!type already_seen 1011) + (!srcfileloc "function.h" 190) + nil ) + (!pair "start" + (!type already_seen 2) + (!srcfileloc "function.h" 192) + nil ) + (!pair "length" + (!type already_seen 2) + (!srcfileloc "function.h" 193) + nil ) + ) + nil 1023 nil nil ) + + (!type already_seen 416) + + (!type already_seen 419) + + (!type already_seen 415) + + (!type already_seen 418) + + (!type already_seen 413) + + (!type already_seen 354) + + (!type already_seen 378) + + (!type already_seen 408) + + (!type already_seen 430) + + (!type already_seen 469) + + (!type struct 1012 nil gc_pointed_to "types_used_by_vars_entry" + (!srcfileloc "function.h" 478) + (!fields 2 + (!pair "type" + (!type already_seen 23) + (!srcfileloc "function.h" 476) + nil ) + (!pair "var_decl" + (!type already_seen 23) + (!srcfileloc "function.h" 477) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1013 nil gc_used "used_type_hasher" + (!srcfileloc "function.h" 491) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1014 + (!type pointer 1015 nil gc_used + (!type already_seen 1014) + ) + gc_pointed_to "hash_table" + (!srcfileloc "function.h" 491) + (!fields 1 + (!pair "used_type_hasher" + (!type already_seen 1013) + (!srcfileloc "function.h" 491) + nil ) + ) + ) + + (!type struct 1016 nil gc_unused "args_size"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1017 nil gc_unused "locate_and_pad_arg_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1018 nil gc_unused "function_abi"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1019 nil gc_unused "function_abi_aggregator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 597) + + (!type already_seen 215) + + (!type already_seen 216) + + (!type already_seen 217) + + (!type already_seen 219) + + (!type already_seen 276) + + (!type already_seen 280) + + (!type already_seen 286) + + (!type already_seen 411) + + (!type already_seen 275) + + (!type already_seen 284) + + (!type already_seen 278) + + (!type already_seen 288) + + (!type already_seen 410) + + (!type struct 1020 nil gc_unused "eni_weights"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1021 nil gc_unused "rtx_iv"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1022 nil gc_unused "loops_list"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 609) + + (!type already_seen 380) + + (!type already_seen 300) + + (!type struct 1023 nil gc_unused "auto_flag"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1024 nil gc_unused "auto_edge_flag"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1025 nil gc_unused "auto_bb_flag"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1026 nil gc_used "sreal" + (!srcfileloc "profile-count.h" 1209) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1027 nil gc_unused "profile_probability" + (!srcfileloc "profile-count.h" 637) + (!fields 0 ) + ) + + (!type struct 1028 nil gc_unused "cselib_val"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1029 nil gc_unused "elt_loc_list"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1030 nil gc_unused "cselib_set"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 299) + + (!type already_seen 294) + + (!type already_seen 271) + + (!type already_seen 292) + + (!type already_seen 295) + + (!type already_seen 293) + + (!type struct 1031 nil gc_unused "edge_iterator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1032 + (!type pointer 1033 nil gc_unused + (!type already_seen 1032) + ) + gc_unused "ipa_ref" + (!srcfileloc "cgraph.h" 170) + (!fields 0 ) + nil 1023 nil nil ) + + (!type already_seen 320) + + (!type already_seen 829) + + (!type already_seen 342) + + (!type struct 1034 nil gc_used "section_name_hasher" + (!srcfileloc "cgraph.h" 2491) + (!fields 0 ) + nil 1023 nil nil ) + + (!type undefined 1035 nil gc_unused "explicit" + (!srcfileloc "cgraph.h" 113) + ) + + (!type already_seen 323) + + (!type struct 1036 + (!type pointer 1037 nil gc_unused + (!type already_seen 1036) + ) + gc_used "symbol_priority_map" + (!srcfileloc "cgraph.h" 2497) + (!fields 2 + (!pair "init" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2164) + nil ) + (!pair "fini" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2165) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1038 + (!type pointer 1039 nil gc_used + (!type already_seen 1038) + ) + gc_pointed_to "ipa_replace_map" + (!srcfileloc "cgraph.h" 711) + (!fields 3 + (!pair "new_tree" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 704) + nil ) + (!pair "parm_num" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 706) + nil ) + (!pair "force_load_ref" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 710) + nil ) + ) + nil 1023 nil nil ) + + (!type already_seen 824) + + (!type already_seen 822) + + (!type struct 1040 + (!type pointer 1041 nil gc_used + (!type already_seen 1040) + ) + gc_pointed_to "cgraph_function_version_info" + (!srcfileloc "cgraph.h" 839) + (!fields 4 + (!pair "this_node" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 820) + nil ) + (!pair "prev" + (!type already_seen 1041) + (!srcfileloc "cgraph.h" 824) + nil ) + (!pair "next" + (!type already_seen 1041) + (!srcfileloc "cgraph.h" 828) + nil ) + (!pair "dispatcher_resolver" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 838) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type already_seen 820) + + (!type user_struct 1042 nil gc_unused "auto_vec" + (!srcfileloc "cgraph.h" 1153) + (!fields 1 + (!pair "cgraph_edge" + (!type already_seen 813) + (!srcfileloc "cgraph.h" 1153) + nil ) + ) + ) + + (!type already_seen 819) + + (!type already_seen 825) + + (!type already_seen 827) + + (!type already_seen 826) + + (!type struct 1043 + (!type pointer 1044 nil gc_unused + (!type already_seen 1043) + ) + gc_unused "cgraph_node_set_def" + (!srcfileloc "cgraph.h" 1515) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 1045 + (!type pointer 1046 nil gc_unused + (!type already_seen 1045) + ) + gc_unused "varpool_node_set_def"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1047 nil gc_unused "cgraph_node_set_iterator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1048 nil gc_unused "varpool_node_set_iterator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 817) + + (!type already_seen 816) + + (!type undefined 1049 nil gc_unused "availability" + (!srcfileloc "cgraph.h" 2007) + ) + + (!type struct 1050 + (!type pointer 1051 nil gc_used + (!type already_seen 1050) + ) + gc_pointed_to "asm_node" + (!srcfileloc "cgraph.h" 2128) + (!fields 3 + (!pair "next" + (!type already_seen 1051) + (!srcfileloc "cgraph.h" 2123) + nil ) + (!pair "asm_str" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 2125) + nil ) + (!pair "order" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2127) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1052 + (!type pointer 1053 nil gc_unused + (!type already_seen 1052) + ) + gc_unused "cgraph_edge_hook_list" + (!srcfileloc "cgraph.h" 2340) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 1054 + (!type pointer 1055 nil gc_unused + (!type already_seen 1054) + ) + gc_unused "cgraph_node_hook_list" + (!srcfileloc "cgraph.h" 2347) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 1056 + (!type pointer 1057 nil gc_unused + (!type already_seen 1056) + ) + gc_unused "varpool_node_hook_list" + (!srcfileloc "cgraph.h" 2354) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 1058 + (!type pointer 1059 nil gc_unused + (!type already_seen 1058) + ) + gc_unused "cgraph_2edge_hook_list" + (!srcfileloc "cgraph.h" 2375) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 1060 + (!type pointer 1061 nil gc_unused + (!type already_seen 1060) + ) + gc_unused "cgraph_2node_hook_list" + (!srcfileloc "cgraph.h" 2381) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 1062 nil gc_used "asmname_hasher" + (!srcfileloc "cgraph.h" 2494) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 1063 + (!type pointer 1064 nil gc_used + (!type already_seen 1063) + ) + gc_pointed_to "thunk_info" + (!srcfileloc "symtab-thunks.h" 145) + (!fields 6 + (!pair "fixed_offset" + (!type already_seen 2) + (!srcfileloc "symtab-thunks.h" 87) + nil ) + (!pair "virtual_value" + (!type already_seen 2) + (!srcfileloc "symtab-thunks.h" 91) + nil ) + (!pair "indirect_offset" + (!type already_seen 2) + (!srcfileloc "symtab-thunks.h" 95) + nil ) + (!pair "alias" + (!type already_seen 23) + (!srcfileloc "symtab-thunks.h" 99) + nil ) + (!pair "this_adjusting" + (!type already_seen 2) + (!srcfileloc "symtab-thunks.h" 103) + nil ) + (!pair "virtual_offset_p" + (!type already_seen 2) + (!srcfileloc "symtab-thunks.h" 111) + nil ) + ) + nil 1023 nil nil ) + + (!type user_struct 1065 + (!type pointer 1066 nil gc_used + (!type already_seen 1065) + ) + gc_pointed_to "function_summary" + (!srcfileloc "cgraph.h" 2201) + (!fields 1 + (!pair "thunk_info" + (!type already_seen 1064) + (!srcfileloc "cgraph.h" 2201) + nil ) + ) + ) + + (!type struct 1067 + (!type pointer 1068 nil gc_used + (!type already_seen 1067) + ) + gc_pointed_to "clone_info" + (!srcfileloc "symtab-clones.h" 49) + (!fields 2 + (!pair "tree_map" + (!type pointer 1069 nil gc_used + (!type user_struct 1070 + (!type already_seen 1069) + gc_pointed_to "vec" + (!srcfileloc "symtab-clones.h" 34) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "symtab-clones.h" 34) + nil ) + (!pair "ipa_replace_map" + (!type already_seen 1039) + (!srcfileloc "symtab-clones.h" 34) + nil ) + ) + ) + ) + (!srcfileloc "symtab-clones.h" 34) + nil ) + (!pair "param_adjustments" + (!type pointer 1071 nil gc_used + (!type struct 1072 + (!type already_seen 1071) + gc_pointed_to "ipa_param_adjustments" + (!srcfileloc "ipa-param-manipulation.h" 271) + (!fields 3 + (!pair "m_adj_params" + (!type pointer 1073 nil gc_used + (!type user_struct 1074 + (!type already_seen 1073) + gc_pointed_to "vec" + (!srcfileloc "ipa-param-manipulation.h" 254) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-param-manipulation.h" 254) + nil ) + (!pair "ipa_adjusted_param" + (!type struct 1075 nil gc_used "ipa_adjusted_param" + (!srcfileloc "ipa-param-manipulation.h" 254) + (!fields 10 + (!pair "type" + (!type already_seen 23) + (!srcfileloc "ipa-param-manipulation.h" 162) + nil ) + (!pair "alias_ptr_type" + (!type already_seen 23) + (!srcfileloc "ipa-param-manipulation.h" 166) + nil ) + (!pair "unit_offset" + (!type already_seen 2) + (!srcfileloc "ipa-param-manipulation.h" 171) + nil ) + (!pair "base_index" + (!type already_seen 2) + (!srcfileloc "ipa-param-manipulation.h" 177) + nil ) + (!pair "prev_clone_index" + (!type already_seen 2) + (!srcfileloc "ipa-param-manipulation.h" 181) + nil ) + (!pair "op" + (!type already_seen 2) + (!srcfileloc "ipa-param-manipulation.h" 184) + nil ) + (!pair "prev_clone_adjustment" + (!type already_seen 2) + (!srcfileloc "ipa-param-manipulation.h" 188) + nil ) + (!pair "param_prefix_index" + (!type already_seen 2) + (!srcfileloc "ipa-param-manipulation.h" 192) + nil ) + (!pair "reverse" + (!type already_seen 2) + (!srcfileloc "ipa-param-manipulation.h" 196) + nil ) + (!pair "user_flag" + (!type already_seen 2) + (!srcfileloc "ipa-param-manipulation.h" 199) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "ipa-param-manipulation.h" 254) + nil ) + ) + ) + ) + (!srcfileloc "ipa-param-manipulation.h" 254) + nil ) + (!pair "m_always_copy_start" + (!type already_seen 2) + (!srcfileloc "ipa-param-manipulation.h" 260) + nil ) + (!pair "m_skip_return" + (!type already_seen 2) + (!srcfileloc "ipa-param-manipulation.h" 262) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "symtab-clones.h" 36) + nil ) + ) + nil 1023 nil nil ) + + (!type user_struct 1076 + (!type pointer 1077 nil gc_used + (!type already_seen 1076) + ) + gc_pointed_to "function_summary" + (!srcfileloc "cgraph.h" 2205) + (!fields 1 + (!pair "clone_info" + (!type already_seen 1068) + (!srcfileloc "cgraph.h" 2205) + nil ) + ) + ) + + (!type struct 1078 + (!type pointer 1079 nil gc_used + (!type already_seen 1078) + ) + gc_pointed_to "symbol_table" + (!srcfileloc "cgraph.h" 2551) + (!fields 33 + (!pair "DEBUG_FUNCTION" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2424) + nil ) + (!pair "cgraph_count" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2452) + nil ) + (!pair "cgraph_max_uid" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2453) + nil ) + (!pair "cgraph_max_summary_id" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2454) + nil ) + (!pair "edges_count" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2456) + nil ) + (!pair "edges_max_uid" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2457) + nil ) + (!pair "edges_max_summary_id" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2458) + nil ) + (!pair "cgraph_released_summary_ids" + (!type user_struct 1080 nil gc_unused "vec" + (!srcfileloc "cgraph.h" 2461) + (!fields 1 + (!pair "int" + (!type already_seen 373) + (!srcfileloc "cgraph.h" 2461) + nil ) + ) + ) + (!srcfileloc "cgraph.h" 2461) + (!options + (!option skip string "") + ) + ) + (!pair "edge_released_summary_ids" + (!type already_seen 1080) + (!srcfileloc "cgraph.h" 2464) + (!options + (!option skip string "") + ) + ) + (!pair "nodes" + (!type already_seen 318) + (!srcfileloc "cgraph.h" 2469) + nil ) + (!pair "asmnodes" + (!type already_seen 1051) + (!srcfileloc "cgraph.h" 2470) + nil ) + (!pair "asm_last_node" + (!type already_seen 1051) + (!srcfileloc "cgraph.h" 2471) + nil ) + (!pair "order" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2476) + nil ) + (!pair "max_unit" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2479) + nil ) + (!pair "global_info_ready" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2482) + nil ) + (!pair "state" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2484) + nil ) + (!pair "function_flags_ready" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2486) + nil ) + (!pair "cpp_implicit_aliases_done" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2488) + nil ) + (!pair "section_hash" + (!type pointer 1081 nil gc_used + (!type user_struct 1082 + (!type already_seen 1081) + gc_pointed_to "hash_table" + (!srcfileloc "cgraph.h" 2491) + (!fields 1 + (!pair "section_name_hasher" + (!type already_seen 1034) + (!srcfileloc "cgraph.h" 2491) + nil ) + ) + ) + ) + (!srcfileloc "cgraph.h" 2491) + nil ) + (!pair "assembler_name_hash" + (!type pointer 1083 nil gc_used + (!type user_struct 1084 + (!type already_seen 1083) + gc_pointed_to "hash_table" + (!srcfileloc "cgraph.h" 2494) + (!fields 1 + (!pair "asmname_hasher" + (!type already_seen 1062) + (!srcfileloc "cgraph.h" 2494) + nil ) + ) + ) + ) + (!srcfileloc "cgraph.h" 2494) + nil ) + (!pair "init_priority_hash" + (!type pointer 1085 nil gc_used + (!type user_struct 1086 + (!type already_seen 1085) + gc_pointed_to "hash_map" + (!srcfileloc "cgraph.h" 2497) + (!fields 2 + (!pair "symbol_priority_map" + (!type already_seen 1036) + (!srcfileloc "cgraph.h" 2497) + nil ) + (!pair "symtab_node" + (!type already_seen 318) + (!srcfileloc "cgraph.h" 2497) + nil ) + ) + ) + ) + (!srcfileloc "cgraph.h" 2497) + nil ) + (!pair "dump_file" + (!type pointer 1087 nil gc_unused + (!type undefined 1088 + (!type already_seen 1087) + gc_unused "FILE" + (!srcfileloc "cgraph.h" 2499) + ) + ) + (!srcfileloc "cgraph.h" 2499) + (!options + (!option skip string "") + ) + ) + (!pair "ipa_clones_dump_file" + (!type already_seen 1087) + (!srcfileloc "cgraph.h" 2501) + (!options + (!option skip string "") + ) + ) + (!pair "cloned_nodes" + (!type user_struct 1089 nil gc_unused "hash_set" + (!srcfileloc "cgraph.h" 2503) + (!fields 1 + (!pair "cgraph_node" + (!type already_seen 812) + (!srcfileloc "cgraph.h" 2503) + nil ) + ) + ) + (!srcfileloc "cgraph.h" 2503) + (!options + (!option skip string "") + ) + ) + (!pair "m_thunks" + (!type already_seen 1066) + (!srcfileloc "cgraph.h" 2506) + nil ) + (!pair "m_clones" + (!type already_seen 1077) + (!srcfileloc "cgraph.h" 2509) + nil ) + (!pair "m_first_edge_removal_hook" + (!type already_seen 1053) + (!srcfileloc "cgraph.h" 2538) + (!options + (!option skip string "") + ) + ) + (!pair "m_first_cgraph_removal_hook" + (!type already_seen 1055) + (!srcfileloc "cgraph.h" 2540) + (!options + (!option skip string "") + ) + ) + (!pair "m_first_edge_duplicated_hook" + (!type already_seen 1059) + (!srcfileloc "cgraph.h" 2542) + (!options + (!option skip string "") + ) + ) + (!pair "m_first_cgraph_duplicated_hook" + (!type already_seen 1061) + (!srcfileloc "cgraph.h" 2544) + (!options + (!option skip string "") + ) + ) + (!pair "m_first_cgraph_insertion_hook" + (!type already_seen 1055) + (!srcfileloc "cgraph.h" 2546) + (!options + (!option skip string "") + ) + ) + (!pair "m_first_varpool_insertion_hook" + (!type already_seen 1057) + (!srcfileloc "cgraph.h" 2548) + (!options + (!option skip string "") + ) + ) + (!pair "m_first_varpool_removal_hook" + (!type already_seen 1057) + (!srcfileloc "cgraph.h" 2550) + (!options + (!option skip string "") + ) + ) + ) + (!options + (!option tag string "SYMTAB") + ) + 1023 nil nil ) + + (!type already_seen 1080) + + (!type already_seen 1082) + + (!type already_seen 1084) + + (!type already_seen 1086) + + (!type already_seen 1088) + + (!type already_seen 1089) + + (!type struct 1090 nil gc_pointed_to "constant_descriptor_tree" + (!srcfileloc "cgraph.h" 3092) + (!fields 3 + (!pair "rtl" + (!type already_seen 99) + (!srcfileloc "cgraph.h" 3083) + nil ) + (!pair "value" + (!type already_seen 23) + (!srcfileloc "cgraph.h" 3086) + nil ) + (!pair "hash" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 3091) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1091 nil gc_used "tree_descriptor_hasher" + (!srcfileloc "varasm.cc" 3093) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 1092 nil gc_unused "symbol_table_test"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1093 nil gc_used "unprocessed_thunk" + (!srcfileloc "symtab-thunks.cc" 62) + (!fields 2 + (!pair "node" + (!type already_seen 812) + (!srcfileloc "symtab-thunks.cc" 57) + nil ) + (!pair "info" + (!type already_seen 1064) + (!srcfileloc "symtab-thunks.cc" 58) + nil ) + ) + nil 1023 nil nil ) + + (!type user_struct 1094 + (!type pointer 1095 nil gc_used + (!type already_seen 1094) + ) + gc_pointed_to "vec" + (!srcfileloc "symtab-thunks.cc" 62) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "symtab-thunks.cc" 62) + nil ) + (!pair "unprocessed_thunk" + (!type already_seen 1093) + (!srcfileloc "symtab-thunks.cc" 62) + nil ) + ) + ) + + (!type user_struct 1096 nil gc_unused "thunk_infos_t" + (!srcfileloc "symtab-thunks.cc" 78) + (!fields 0 ) + ) + + (!type already_seen 1070) + + (!type already_seen 1072) + + (!type struct 1097 nil gc_unused "reload"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 599) + + (!type struct 1098 nil gc_unused "reg_equivs_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1099 nil gc_unused "insn_chain"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1100 nil gc_unused "saved_hard_reg"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1101 nil gc_used "alias_set_hash" + (!srcfileloc "alias.cc" 148) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1102 + (!type pointer 1103 nil gc_used + (!type already_seen 1102) + ) + gc_pointed_to "hash_map" + (!srcfileloc "alias.cc" 148) + (!fields 2 + (!pair "int" + (!type already_seen 373) + (!srcfileloc "alias.cc" 148) + nil ) + (!pair "alias_set_hash" + (!type already_seen 1101) + (!srcfileloc "alias.cc" 148) + nil ) + ) + ) + + (!type struct 1104 + (!type pointer 1105 nil gc_used + (!type already_seen 1104) + ) + gc_pointed_to "alias_set_entry" + (!srcfileloc "alias.cc" 149) + (!fields 5 + (!pair "alias_set" + (!type already_seen 2) + (!srcfileloc "alias.cc" 128) + nil ) + (!pair "has_zero_child" + (!type already_seen 2) + (!srcfileloc "alias.cc" 132) + nil ) + (!pair "is_pointer" + (!type already_seen 2) + (!srcfileloc "alias.cc" 137) + nil ) + (!pair "has_pointer" + (!type already_seen 2) + (!srcfileloc "alias.cc" 139) + nil ) + (!pair "children" + (!type already_seen 1103) + (!srcfileloc "alias.cc" 148) + nil ) + ) + nil 1023 nil nil ) + + (!type user_struct 1106 + (!type pointer 1107 nil gc_used + (!type already_seen 1106) + ) + gc_pointed_to "vec" + (!srcfileloc "alias.cc" 280) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "alias.cc" 280) + nil ) + (!pair "alias_set_entry" + (!type already_seen 1105) + (!srcfileloc "alias.cc" 280) + nil ) + ) + ) + + (!type struct 1108 nil gc_unused "elt_list"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1109 nil gc_unused "expand_value_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1110 nil gc_unused "cselib_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1111 nil gc_unused "cselib_record_autoinc_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1112 nil gc_used "function_version_hasher" + (!srcfileloc "cgraph.cc" 122) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1113 + (!type pointer 1114 nil gc_used + (!type already_seen 1113) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cgraph.cc" 122) + (!fields 1 + (!pair "function_version_hasher" + (!type already_seen 1112) + (!srcfileloc "cgraph.cc" 122) + nil ) + ) + ) + + (!type struct 1115 nil gc_unused "set_pure_flag_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 325) + + (!type struct 1116 nil gc_used "ipa_bit_ggc_hash_traits" + (!srcfileloc "ipa-prop.cc" 109) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1117 + (!type pointer 1118 nil gc_used + (!type already_seen 1117) + ) + gc_pointed_to "hash_table" + (!srcfileloc "ipa-prop.cc" 109) + (!fields 1 + (!pair "ipa_bit_ggc_hash_traits" + (!type already_seen 1116) + (!srcfileloc "ipa-prop.cc" 109) + nil ) + ) + ) + + (!type struct 1119 nil gc_used "ipa_vr_ggc_hash_traits" + (!srcfileloc "ipa-prop.cc" 156) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1120 + (!type pointer 1121 nil gc_used + (!type already_seen 1120) + ) + gc_pointed_to "hash_table" + (!srcfileloc "ipa-prop.cc" 156) + (!fields 1 + (!pair "ipa_vr_ggc_hash_traits" + (!type already_seen 1119) + (!srcfileloc "ipa-prop.cc" 156) + nil ) + ) + ) + + (!type struct 1122 + (!type pointer 1123 nil gc_unused + (!type already_seen 1122) + ) + gc_unused "ipa_cst_ref_desc"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1124 nil gc_used "ipa_jump_func" + (!srcfileloc "ipa-prop.h" 961) + (!fields 5 + (!pair "agg" + (!type struct 1125 nil gc_used "ipa_agg_jump_function" + (!srcfileloc "ipa-prop.h" 185) + (!fields 2 + (!pair "items" + (!type pointer 1126 nil gc_used + (!type user_struct 1127 + (!type already_seen 1126) + gc_pointed_to "vec" + (!srcfileloc "ipa-prop.h" 182) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-prop.h" 182) + nil ) + (!pair "ipa_agg_jf_item" + (!type struct 1128 nil gc_used "ipa_agg_jf_item" + (!srcfileloc "ipa-prop.h" 182) + (!fields 4 + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 157) + nil ) + (!pair "type" + (!type already_seen 23) + (!srcfileloc "ipa-prop.h" 160) + nil ) + (!pair "jftype" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 163) + nil ) + (!pair "value" + (!type union 1129 nil gc_used "jump_func_agg_value" + (!srcfileloc "ipa-prop.h" 174) + (!fields 3 + (!pair "constant" + (!type already_seen 23) + (!srcfileloc "ipa-prop.h" 171) + (!options + (!option tag string "IPA_JF_CONST") + ) + ) + (!pair "pass_through" + (!type struct 1130 nil gc_used "ipa_pass_through_data" + (!srcfileloc "ipa-prop.h" 113) + (!fields 4 + (!pair "operand" + (!type already_seen 23) + (!srcfileloc "ipa-prop.h" 93) + nil ) + (!pair "formal_id" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 95) + nil ) + (!pair "operation" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 105) + nil ) + (!pair "agg_preserved" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 112) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "ipa-prop.h" 172) + (!options + (!option tag string "IPA_JF_PASS_THROUGH") + ) + ) + (!pair "load_agg" + (!type struct 1131 nil gc_used "ipa_load_agg_data" + (!srcfileloc "ipa-prop.h" 133) + (!fields 4 + (!pair "pass_through" + (!type already_seen 1130) + (!srcfileloc "ipa-prop.h" 124) + nil ) + (!pair "type" + (!type already_seen 23) + (!srcfileloc "ipa-prop.h" 126) + nil ) + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 128) + nil ) + (!pair "by_ref" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 132) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "ipa-prop.h" 173) + (!options + (!option tag string "IPA_JF_LOAD_AGG") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "ipa-prop.h" 174) + (!options + (!option desc string "%1.jftype") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "ipa-prop.h" 182) + nil ) + ) + ) + ) + (!srcfileloc "ipa-prop.h" 182) + nil ) + (!pair "by_ref" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 184) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "ipa-prop.h" 318) + nil ) + (!pair "bits" + (!type pointer 1132 nil gc_used + (!type struct 1133 + (!type already_seen 1132) + gc_pointed_to "ipa_bits" + (!srcfileloc "ipa-prop.h" 296) + (!fields 2 + (!pair "value" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 291) + nil ) + (!pair "mask" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 295) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "ipa-prop.h" 323) + nil ) + (!pair "m_vr" + (!type pointer 1134 nil gc_used + (!type user_struct 1135 + (!type already_seen 1134) + gc_pointed_to "int_range<1>" + (!srcfileloc "value-range.h" 182) + (!fields 1 + (!pair "1" + (!type undefined 1136 nil gc_unused "1" + (!srcfileloc "value-range.h" 182) + ) + (!srcfileloc "value-range.h" 182) + nil ) + ) + ) + ) + (!srcfileloc "ipa-prop.h" 328) + nil ) + (!pair "type" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 330) + nil ) + (!pair "value" + (!type union 1137 nil gc_used "jump_func_value" + (!srcfileloc "ipa-prop.h" 339) + (!fields 3 + (!pair "constant" + (!type struct 1138 nil gc_used "ipa_constant_data" + (!srcfileloc "ipa-prop.h" 85) + (!fields 2 + (!pair "value" + (!type already_seen 23) + (!srcfileloc "ipa-prop.h" 82) + nil ) + (!pair "rdesc" + (!type already_seen 1123) + (!srcfileloc "ipa-prop.h" 84) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "ipa-prop.h" 336) + (!options + (!option tag string "IPA_JF_CONST") + ) + ) + (!pair "pass_through" + (!type already_seen 1130) + (!srcfileloc "ipa-prop.h" 337) + (!options + (!option tag string "IPA_JF_PASS_THROUGH") + ) + ) + (!pair "ancestor" + (!type struct 1139 nil gc_used "ipa_ancestor_jf_data" + (!srcfileloc "ipa-prop.h" 148) + (!fields 4 + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 141) + nil ) + (!pair "formal_id" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 143) + nil ) + (!pair "agg_preserved" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 145) + nil ) + (!pair "keep_null" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 147) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "ipa-prop.h" 338) + (!options + (!option tag string "IPA_JF_ANCESTOR") + ) + ) + ) + nil 1023 nil ) + (!srcfileloc "ipa-prop.h" 339) + (!options + (!option desc string "%1.type") + ) + ) + ) + nil 1023 nil nil ) + + (!type already_seen 1128) + + (!type struct 1140 nil gc_unused "prop_type_change_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1141 nil gc_unused "ipa_bb_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1142 nil gc_unused "ipa_param_aa_status"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1143 nil gc_pointed_to "ipa_node_params" + (!srcfileloc "ipa-prop.h" 632) + (!fields 14 + (!pair "descriptors" + (!type pointer 1144 nil gc_used + (!type user_struct 1145 + (!type already_seen 1144) + gc_pointed_to "vec" + (!srcfileloc "ipa-prop.h" 598) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-prop.h" 598) + nil ) + (!pair "ipa_param_descriptor" + (!type struct 1146 nil gc_used "ipa_param_descriptor" + (!srcfileloc "ipa-prop.h" 598) + (!fields 8 + (!pair "decl_or_type" + (!type already_seen 23) + (!srcfileloc "ipa-prop.h" 563) + nil ) + (!pair "controlled_uses" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 568) + nil ) + (!pair "move_cost" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 569) + nil ) + (!pair "used" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 571) + nil ) + (!pair "used_by_ipa_predicates" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 572) + nil ) + (!pair "used_by_indirect_call" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 573) + nil ) + (!pair "used_by_polymorphic_call" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 574) + nil ) + (!pair "load_dereferenced" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 580) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "ipa-prop.h" 598) + nil ) + ) + ) + ) + (!srcfileloc "ipa-prop.h" 598) + nil ) + (!pair "lattices" + (!type pointer 1147 nil gc_unused + (!type struct 1148 + (!type already_seen 1147) + gc_unused "ipcp_param_lattices"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "ipa-prop.h" 601) + (!options + (!option skip string "") + ) + ) + (!pair "ipcp_orig_node" + (!type already_seen 812) + (!srcfileloc "ipa-prop.h" 604) + (!options + (!option skip string "") + ) + ) + (!pair "known_csts" + (!type user_struct 1149 nil gc_unused "vec" + (!srcfileloc "ipa-prop.h" 607) + (!fields 1 + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "ipa-prop.h" 607) + nil ) + ) + ) + (!srcfileloc "ipa-prop.h" 607) + (!options + (!option skip string "") + ) + ) + (!pair "known_contexts" + (!type user_struct 1150 nil gc_unused "vec" + (!srcfileloc "ipa-prop.h" 610) + (!fields 1 + (!pair "ipa_polymorphic_call_context" + (!type already_seen 817) + (!srcfileloc "ipa-prop.h" 610) + nil ) + ) + ) + (!srcfileloc "ipa-prop.h" 610) + (!options + (!option skip string "") + ) + ) + (!pair "analysis_done" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 613) + nil ) + (!pair "node_enqueued" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 615) + nil ) + (!pair "do_clone_for_all_contexts" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 618) + nil ) + (!pair "is_all_contexts_clone" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 620) + nil ) + (!pair "node_dead" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 623) + nil ) + (!pair "node_within_scc" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 625) + nil ) + (!pair "node_is_self_scc" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 627) + nil ) + (!pair "node_calling_single_call" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 629) + nil ) + (!pair "versionable" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 631) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1151 nil gc_unused "ipa_known_agg_contents_list"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 1131) + + (!type struct 1152 nil gc_unused "analysis_dom_walker"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1153 nil gc_unused "ipa_func_body_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1154 nil gc_unused "ipa_agg_value"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1155 nil gc_pointed_to "ipa_edge_args" + (!srcfileloc "ipa-prop.h" 963) + (!fields 2 + (!pair "jump_functions" + (!type pointer 1156 nil gc_used + (!type user_struct 1157 + (!type already_seen 1156) + gc_pointed_to "vec" + (!srcfileloc "ipa-prop.h" 961) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-prop.h" 961) + nil ) + (!pair "ipa_jump_func" + (!type already_seen 1124) + (!srcfileloc "ipa-prop.h" 961) + nil ) + ) + ) + ) + (!srcfileloc "ipa-prop.h" 961) + nil ) + (!pair "polymorphic_call_contexts" + (!type pointer 1158 nil gc_used + (!type user_struct 1159 + (!type already_seen 1158) + gc_pointed_to "vec" + (!srcfileloc "ipa-prop.h" 962) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-prop.h" 962) + nil ) + (!pair "ipa_polymorphic_call_context" + (!type already_seen 817) + (!srcfileloc "ipa-prop.h" 962) + nil ) + ) + ) + ) + (!srcfileloc "ipa-prop.h" 962) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1160 + (!type pointer 1161 nil gc_used + (!type already_seen 1160) + ) + gc_pointed_to "ipa_agg_replacement_value" + (!srcfileloc "ipa-prop.h" 906) + (!fields 5 + (!pair "next" + (!type already_seen 1161) + (!srcfileloc "ipa-prop.h" 890) + nil ) + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 892) + nil ) + (!pair "value" + (!type already_seen 23) + (!srcfileloc "ipa-prop.h" 894) + nil ) + (!pair "index" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 896) + nil ) + (!pair "by_ref" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 898) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1162 nil gc_unused "bitpack_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1163 nil gc_unused "data_in"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1164 nil gc_unused "output_block"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1165 nil gc_unused "ipcp_modif_dom_walker"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1166 nil gc_unused "ipcp_value_source"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1167 nil gc_unused "ipcp_value_base"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1168 nil gc_unused "ipcp_value"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1169 nil gc_unused "ipcp_lattice"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1170 nil gc_unused "ipcp_agg_lattice"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1171 nil gc_unused "ipcp_bits_lattice"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1172 nil gc_unused "ipcp_vr_lattice"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 1148) + + (!type struct 1173 nil gc_unused "caller_statistics"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1174 nil gc_unused "value_topo_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1175 nil gc_unused "ipa_topo_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1176 nil gc_unused "ipa_agg_value_set"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 1125) + + (!type struct 1177 + (!type pointer 1178 nil gc_used + (!type already_seen 1177) + ) + gc_pointed_to "ipa_fn_summary" + (!srcfileloc "ipa-fnsummary.h" 219) + (!fields 15 + (!pair "min_size" + (!type already_seen 2) + (!srcfileloc "ipa-fnsummary.h" 159) + nil ) + (!pair "inlinable" + (!type already_seen 2) + (!srcfileloc "ipa-fnsummary.h" 162) + nil ) + (!pair "single_caller" + (!type already_seen 2) + (!srcfileloc "ipa-fnsummary.h" 165) + nil ) + (!pair "fp_expressions" + (!type already_seen 2) + (!srcfileloc "ipa-fnsummary.h" 167) + nil ) + (!pair "target_info" + (!type already_seen 2) + (!srcfileloc "ipa-fnsummary.h" 171) + nil ) + (!pair "estimated_stack_size" + (!type already_seen 2) + (!srcfileloc "ipa-fnsummary.h" 178) + nil ) + (!pair "time" + (!type already_seen 1026) + (!srcfileloc "ipa-fnsummary.h" 180) + (!options + (!option skip string "") + ) + ) + (!pair "conds" + (!type pointer 1179 nil gc_used + (!type user_struct 1180 + (!type already_seen 1179) + gc_pointed_to "vec" + (!srcfileloc "ipa-predicate.h" 94) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-predicate.h" 94) + nil ) + (!pair "condition" + (!type struct 1181 nil gc_used "condition" + (!srcfileloc "ipa-predicate.h" 94) + (!fields 8 + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "ipa-predicate.h" 52) + nil ) + (!pair "type" + (!type already_seen 23) + (!srcfileloc "ipa-predicate.h" 54) + nil ) + (!pair "val" + (!type already_seen 23) + (!srcfileloc "ipa-predicate.h" 55) + nil ) + (!pair "operand_num" + (!type already_seen 2) + (!srcfileloc "ipa-predicate.h" 56) + nil ) + (!pair "code" + (!type already_seen 2) + (!srcfileloc "ipa-predicate.h" 57) + nil ) + (!pair "agg_contents" + (!type already_seen 2) + (!srcfileloc "ipa-predicate.h" 60) + nil ) + (!pair "by_ref" + (!type already_seen 2) + (!srcfileloc "ipa-predicate.h" 63) + nil ) + (!pair "param_ops" + (!type pointer 1182 nil gc_used + (!type user_struct 1183 + (!type already_seen 1182) + gc_pointed_to "vec" + (!srcfileloc "ipa-predicate.h" 46) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-predicate.h" 46) + nil ) + (!pair "expr_eval_op" + (!type struct 1184 nil gc_used "expr_eval_op" + (!srcfileloc "ipa-predicate.h" 46) + (!fields 4 + (!pair "type" + (!type already_seen 23) + (!srcfileloc "ipa-predicate.h" 37) + nil ) + (!pair "val" + (!type array 1185 nil gc_used "2" + (!type already_seen 23) + ) + (!srcfileloc "ipa-predicate.h" 39) + nil ) + (!pair "index" + (!type already_seen 2) + (!srcfileloc "ipa-predicate.h" 41) + nil ) + (!pair "code" + (!type already_seen 2) + (!srcfileloc "ipa-predicate.h" 43) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "ipa-predicate.h" 46) + nil ) + ) + ) + ) + (!srcfileloc "ipa-predicate.h" 66) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "ipa-predicate.h" 94) + nil ) + ) + ) + ) + (!srcfileloc "ipa-fnsummary.h" 184) + nil ) + (!pair "size_time_table" + (!type user_struct 1186 nil gc_unused "auto_vec" + (!srcfileloc "ipa-fnsummary.h" 189) + (!fields 1 + (!pair "size_time_entry" + (!type struct 1187 nil gc_unused "size_time_entry" + (!srcfileloc "ipa-fnsummary.h" 189) + (!fields 0 ) + nil 1023 nil nil ) + (!srcfileloc "ipa-fnsummary.h" 189) + nil ) + ) + ) + (!srcfileloc "ipa-fnsummary.h" 189) + (!options + (!option skip string "") + ) + ) + (!pair "call_size_time_table" + (!type user_struct 1188 nil gc_unused "vec" + (!srcfileloc "ipa-fnsummary.h" 193) + (!fields 3 + (!pair "vl_ptr" + (!type already_seen 826) + (!srcfileloc "ipa-fnsummary.h" 193) + nil ) + (!pair "va_heap" + (!type already_seen 827) + (!srcfileloc "ipa-fnsummary.h" 193) + nil ) + (!pair "size_time_entry" + (!type already_seen 1187) + (!srcfileloc "ipa-fnsummary.h" 193) + nil ) + ) + ) + (!srcfileloc "ipa-fnsummary.h" 193) + (!options + (!option skip string "") + ) + ) + (!pair "loop_iterations" + (!type pointer 1189 nil gc_used + (!type user_struct 1190 + (!type already_seen 1189) + gc_pointed_to "vec" + (!srcfileloc "ipa-fnsummary.h" 196) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-fnsummary.h" 196) + nil ) + (!pair "ipa_freqcounting_predicate" + (!type struct 1191 nil gc_used "ipa_freqcounting_predicate" + (!srcfileloc "ipa-fnsummary.h" 196) + (!fields 2 + (!pair "freq" + (!type already_seen 1026) + (!srcfileloc "ipa-fnsummary.h" 115) + nil ) + (!pair "predicate" + (!type pointer 1192 nil gc_unused + (!type struct 1193 + (!type already_seen 1192) + gc_unused "ipa_predicate" + (!srcfileloc "ipa-fnsummary.h" 117) + (!fields 0 ) + nil 1023 nil nil ) + ) + (!srcfileloc "ipa-fnsummary.h" 117) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil nil ) + (!srcfileloc "ipa-fnsummary.h" 196) + nil ) + ) + ) + ) + (!srcfileloc "ipa-fnsummary.h" 196) + nil ) + (!pair "loop_strides" + (!type already_seen 1189) + (!srcfileloc "ipa-fnsummary.h" 198) + nil ) + (!pair "builtin_constant_p_parms" + (!type user_struct 1194 nil gc_unused "vec" + (!srcfileloc "ipa-fnsummary.h" 200) + (!fields 3 + (!pair "vl_ptr" + (!type already_seen 826) + (!srcfileloc "ipa-fnsummary.h" 200) + nil ) + (!pair "va_heap" + (!type already_seen 827) + (!srcfileloc "ipa-fnsummary.h" 200) + nil ) + (!pair "int" + (!type already_seen 373) + (!srcfileloc "ipa-fnsummary.h" 200) + nil ) + ) + ) + (!srcfileloc "ipa-fnsummary.h" 200) + (!options + (!option skip string "") + ) + ) + (!pair "growth" + (!type already_seen 2) + (!srcfileloc "ipa-fnsummary.h" 206) + nil ) + (!pair "scc_no" + (!type already_seen 2) + (!srcfileloc "ipa-fnsummary.h" 208) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1195 nil gc_unused "edge_clone_summary"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1196 nil gc_unused "edge_clone_summary_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1197 nil gc_unused "gather_other_count_struct"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1198 nil gc_unused "desc_incoming_count_struct"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1199 nil gc_unused "symbol_and_index_together"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1200 nil gc_unused "pass_ipa_cp"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1201 nil gc_unused "ipa_dfs_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1202 + (!type pointer 1203 nil gc_used + (!type already_seen 1202) + ) + gc_pointed_to "odr_type_d" + (!srcfileloc "ipa-devirt.cc" 227) + (!fields 11 + (!pair "type" + (!type already_seen 23) + (!srcfileloc "ipa-devirt.cc" 203) + nil ) + (!pair "bases" + (!type user_struct 1204 nil gc_unused "vec" + (!srcfileloc "ipa-devirt.cc" 205) + (!fields 1 + (!pair "odr_type" + (!type already_seen 1203) + (!srcfileloc "ipa-devirt.cc" 205) + nil ) + ) + ) + (!srcfileloc "ipa-devirt.cc" 205) + (!options + (!option skip string "") + ) + ) + (!pair "derived_types" + (!type already_seen 1204) + (!srcfileloc "ipa-devirt.cc" 208) + (!options + (!option skip string "") + ) + ) + (!pair "types" + (!type already_seen 84) + (!srcfileloc "ipa-devirt.cc" 211) + nil ) + (!pair "types_set" + (!type already_seen 468) + (!srcfileloc "ipa-devirt.cc" 213) + (!options + (!option skip string "") + ) + ) + (!pair "id" + (!type already_seen 2) + (!srcfileloc "ipa-devirt.cc" 216) + nil ) + (!pair "anonymous_namespace" + (!type already_seen 2) + (!srcfileloc "ipa-devirt.cc" 218) + nil ) + (!pair "all_derivations_known" + (!type already_seen 2) + (!srcfileloc "ipa-devirt.cc" 220) + nil ) + (!pair "odr_violated" + (!type already_seen 2) + (!srcfileloc "ipa-devirt.cc" 222) + nil ) + (!pair "rtti_broken" + (!type already_seen 2) + (!srcfileloc "ipa-devirt.cc" 224) + nil ) + (!pair "tbaa_enabled" + (!type already_seen 2) + (!srcfileloc "ipa-devirt.cc" 226) + nil ) + ) + nil 1023 nil nil ) + + (!type already_seen 1075) + + (!type already_seen 1074) + + (!type struct 1205 nil gc_unused "ipa_param_body_replacement"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1206 nil gc_unused "ipa_param_body_adjustments"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1207 + (!type pointer 1208 nil gc_used + (!type already_seen 1207) + ) + gc_pointed_to "param_access" + (!srcfileloc "ipa-sra.cc" 125) + (!fields 6 + (!pair "type" + (!type already_seen 23) + (!srcfileloc "ipa-sra.cc" 108) + nil ) + (!pair "alias_ptr_type" + (!type already_seen 23) + (!srcfileloc "ipa-sra.cc" 112) + nil ) + (!pair "unit_offset" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 116) + nil ) + (!pair "unit_size" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 117) + nil ) + (!pair "certain" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 122) + nil ) + (!pair "reverse" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 124) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1209 nil gc_unused "gensum_param_access"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1210 + (!type pointer 1211 nil gc_used + (!type already_seen 1210) + ) + gc_pointed_to "vec" + (!srcfileloc "ipa-sra.cc" 167) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-sra.cc" 167) + nil ) + (!pair "param_access" + (!type already_seen 1208) + (!srcfileloc "ipa-sra.cc" 167) + nil ) + ) + ) + + (!type struct 1212 nil gc_used "isra_param_desc" + (!srcfileloc "ipa-sra.cc" 256) + (!fields 6 + (!pair "accesses" + (!type already_seen 1211) + (!srcfileloc "ipa-sra.cc" 167) + nil ) + (!pair "param_size_limit" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 170) + nil ) + (!pair "size_reached" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 172) + nil ) + (!pair "locally_unused" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 176) + nil ) + (!pair "split_candidate" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 178) + nil ) + (!pair "by_ref" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 180) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1213 nil gc_unused "gensum_param_desc"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1214 nil gc_pointed_to "isra_func_summary" + (!srcfileloc "ipa-sra.cc" 274) + (!fields 5 + (!pair "m_parameters" + (!type pointer 1215 nil gc_used + (!type user_struct 1216 + (!type already_seen 1215) + gc_pointed_to "vec" + (!srcfileloc "ipa-sra.cc" 256) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-sra.cc" 256) + nil ) + (!pair "isra_param_desc" + (!type already_seen 1212) + (!srcfileloc "ipa-sra.cc" 256) + nil ) + ) + ) + ) + (!srcfileloc "ipa-sra.cc" 256) + nil ) + (!pair "m_candidate" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 260) + nil ) + (!pair "m_returns_value" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 263) + nil ) + (!pair "m_return_ignored" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 268) + nil ) + (!pair "m_queued" + (!type already_seen 2) + (!srcfileloc "ipa-sra.cc" 273) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type already_seen 1216) + + (!type struct 1217 nil gc_unused "isra_param_flow"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1218 nil gc_unused "isra_call_summary"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1219 + (!type pointer 1220 nil gc_used + (!type already_seen 1219) + ) + gc_pointed_to "ipa_sra_function_summaries" + (!srcfileloc "ipa-sra.cc" 383) + (!fields 0 ) + ) + + (!type struct 1221 nil gc_unused "ipa_sra_call_summaries"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1222 nil gc_unused "scan_call_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1223 nil gc_unused "caller_issues"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1224 nil gc_unused "pass_ipa_sra"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1225 + (!type pointer 1226 nil gc_used + (!type already_seen 1225) + ) + gc_pointed_to "typeinfo" + (!srcfileloc "dbxout.cc" 174) + (!fields 3 + (!pair "status" + (!type already_seen 2) + (!srcfileloc "dbxout.cc" 171) + nil ) + (!pair "file_number" + (!type already_seen 2) + (!srcfileloc "dbxout.cc" 172) + nil ) + (!pair "type_number" + (!type already_seen 2) + (!srcfileloc "dbxout.cc" 173) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1227 nil gc_unused "dbx_file"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1228 + (!type pointer 1229 nil gc_used + (!type already_seen 1228) + ) + gc_pointed_to "modref_tree" + (!srcfileloc "ipa-modref.h" 23) + (!fields 1 + (!pair "alias_set_type" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 23) + nil ) + ) + ) + + (!type user_struct 1230 nil gc_unused "auto_vec" + (!srcfileloc "ipa-modref.h" 33) + (!fields 1 + (!pair "modref_access_node" + (!type struct 1231 nil gc_unused "modref_access_node" + (!srcfileloc "ipa-modref-tree.h" 138) + (!fields 7 + (!pair "offset" + (!type already_seen 2) + (!srcfileloc "ipa-modref-tree.h" 67) + nil ) + (!pair "size" + (!type already_seen 2) + (!srcfileloc "ipa-modref-tree.h" 68) + nil ) + (!pair "max_size" + (!type already_seen 2) + (!srcfileloc "ipa-modref-tree.h" 69) + nil ) + (!pair "parm_offset" + (!type already_seen 2) + (!srcfileloc "ipa-modref-tree.h" 72) + nil ) + (!pair "parm_index" + (!type already_seen 2) + (!srcfileloc "ipa-modref-tree.h" 76) + nil ) + (!pair "parm_offset_known" + (!type already_seen 2) + (!srcfileloc "ipa-modref-tree.h" 77) + nil ) + (!pair "adjustments" + (!type already_seen 8) + (!srcfileloc "ipa-modref-tree.h" 80) + nil ) + ) + nil 1023 nil nil ) + (!srcfileloc "ipa-modref.h" 33) + nil ) + ) + ) + + (!type already_seen 1231) + + (!type user_struct 1232 nil gc_unused "auto_vec" + (!srcfileloc "ipa-modref.h" 34) + (!fields 1 + (!pair "eaf_flags_t" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 34) + nil ) + ) + ) + + (!type struct 1233 + (!type pointer 1234 nil gc_used + (!type already_seen 1233) + ) + gc_pointed_to "modref_summary" + (!srcfileloc "ipa-modref.h" 72) + (!fields 14 + (!pair "loads" + (!type already_seen 1229) + (!srcfileloc "ipa-modref.h" 31) + nil ) + (!pair "stores" + (!type already_seen 1229) + (!srcfileloc "ipa-modref.h" 32) + nil ) + (!pair "kills" + (!type already_seen 1230) + (!srcfileloc "ipa-modref.h" 33) + (!options + (!option skip string "") + ) + ) + (!pair "arg_flags" + (!type already_seen 1232) + (!srcfileloc "ipa-modref.h" 34) + (!options + (!option skip string "") + ) + ) + (!pair "retslot_flags" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 36) + nil ) + (!pair "static_chain_flags" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 37) + nil ) + (!pair "writes_errno" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 39) + nil ) + (!pair "side_effects" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 42) + nil ) + (!pair "nondeterministic" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 45) + nil ) + (!pair "calls_interposable" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 52) + nil ) + (!pair "load_accesses" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 57) + nil ) + (!pair "global_memory_read" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 62) + nil ) + (!pair "global_memory_written" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 63) + nil ) + (!pair "try_dse" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 64) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1235 nil gc_unused "fnspec_summary"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1236 nil gc_unused "fnspec_summaries_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1237 nil gc_unused "escape_entry"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1238 nil gc_unused "escape_summary"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1239 nil gc_unused "escape_summaries_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1240 nil gc_unused "modref_summaries" + (!srcfileloc "ipa-modref.cc" 230) + (!fields 0 ) + ) + + (!type struct 1241 + (!type pointer 1242 nil gc_used + (!type already_seen 1241) + ) + gc_pointed_to "modref_summary_lto" + (!srcfileloc "ipa-modref.cc" 372) + (!fields 10 + (!pair "loads" + (!type pointer 1243 nil gc_used + (!type user_struct 1244 + (!type already_seen 1243) + gc_pointed_to "modref_tree" + (!srcfileloc "ipa-modref.cc" 350) + (!fields 1 + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "ipa-modref.cc" 350) + nil ) + ) + ) + ) + (!srcfileloc "ipa-modref.cc" 357) + nil ) + (!pair "stores" + (!type already_seen 1243) + (!srcfileloc "ipa-modref.cc" 358) + nil ) + (!pair "kills" + (!type already_seen 1230) + (!srcfileloc "ipa-modref.cc" 359) + (!options + (!option skip string "") + ) + ) + (!pair "arg_flags" + (!type already_seen 1232) + (!srcfileloc "ipa-modref.cc" 360) + (!options + (!option skip string "") + ) + ) + (!pair "retslot_flags" + (!type already_seen 2) + (!srcfileloc "ipa-modref.cc" 361) + nil ) + (!pair "static_chain_flags" + (!type already_seen 2) + (!srcfileloc "ipa-modref.cc" 362) + nil ) + (!pair "writes_errno" + (!type already_seen 2) + (!srcfileloc "ipa-modref.cc" 363) + nil ) + (!pair "side_effects" + (!type already_seen 2) + (!srcfileloc "ipa-modref.cc" 364) + nil ) + (!pair "nondeterministic" + (!type already_seen 2) + (!srcfileloc "ipa-modref.cc" 365) + nil ) + (!pair "calls_interposable" + (!type already_seen 2) + (!srcfileloc "ipa-modref.cc" 366) + nil ) + ) + nil 1023 nil nil ) + + (!type user_struct 1245 nil gc_unused "modref_summaries_lto" + (!srcfileloc "ipa-modref.cc" 255) + (!fields 0 ) + ) + + (!type user_struct 1246 + (!type pointer 1247 nil gc_used + (!type already_seen 1246) + ) + gc_pointed_to "fast_function_summary" + (!srcfileloc "ipa-modref.cc" 260) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-modref.cc" 260) + nil ) + (!pair "modref_summary" + (!type already_seen 1234) + (!srcfileloc "ipa-modref.cc" 260) + nil ) + ) + ) + + (!type user_struct 1248 + (!type pointer 1249 nil gc_used + (!type already_seen 1248) + ) + gc_pointed_to "fast_function_summary" + (!srcfileloc "ipa-modref.cc" 272) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-modref.cc" 272) + nil ) + (!pair "modref_summary_lto" + (!type already_seen 1242) + (!srcfileloc "ipa-modref.cc" 272) + nil ) + ) + ) + + (!type already_seen 1244) + + (!type struct 1250 nil gc_unused "modref_parm_map"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1251 nil gc_unused "modref_access_analysis"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1252 nil gc_unused "escape_point"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1253 nil gc_unused "modref_lattice"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1254 nil gc_unused "modref_eaf_analysis"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1255 nil gc_unused "stack_entry"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1256 nil gc_unused "pass_modref"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1257 nil gc_unused "pass_ipa_modref"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1258 nil gc_unused "ipa_call_summary"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1259 nil gc_unused "escape_map"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1260 nil gc_unused "ipa_modref_summary"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1261 nil gc_unused "modref_ref_node" + (!srcfileloc "ipa-modref-tree.h" 205) + (!fields 0 ) + ) + + (!type user_struct 1262 nil gc_unused "modref_base_node" + (!srcfileloc "ipa-modref-tree.h" 288) + (!fields 0 ) + ) + + (!type user_struct 1263 nil gc_unused "modref_tree" + (!srcfileloc "ipa-modref-tree.h" 738) + (!fields 0 ) + ) + + (!type struct 1264 nil gc_used "nowarn_spec_t" + (!srcfileloc "diagnostic-spec.h" 135) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1265 + (!type pointer 1266 nil gc_used + (!type already_seen 1265) + ) + gc_pointed_to "hash_map" + (!srcfileloc "diagnostic-spec.h" 135) + (!fields 2 + (!pair "nowarn_spec_t" + (!type already_seen 1264) + (!srcfileloc "diagnostic-spec.h" 135) + nil ) + (!pair "location_hash" + (!type already_seen 732) + (!srcfileloc "diagnostic-spec.h" 135) + nil ) + ) + ) + + (!type already_seen 479) + + (!type already_seen 475) + + (!type already_seen 478) + + (!type already_seen 486) + + (!type already_seen 511) + + (!type already_seen 513) + + (!type already_seen 476) + + (!type already_seen 473) + + (!type already_seen 514) + + (!type already_seen 500) + + (!type already_seen 509) + + (!type already_seen 508) + + (!type already_seen 482) + + (!type already_seen 501) + + (!type already_seen 503) + + (!type already_seen 505) + + (!type already_seen 507) + + (!type already_seen 484) + + (!type struct 1267 nil gc_unused "array_descr_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1268 nil gc_unused "fixed_point_type_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 495) + + (!type user_struct 1269 + (!type pointer 1270 nil gc_used + (!type already_seen 1269) + ) + gc_pointed_to "hash_map" + (!srcfileloc "dwarf2asm.cc" 911) + (!fields 1 + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "dwarf2asm.cc" 911) + nil ) + ) + ) + + (!type struct 1271 + (!type pointer 1272 nil gc_used + (!type already_seen 1271) + ) + gc_pointed_to "dw_cfi_row" + (!srcfileloc "dwarf2cfi.cc" 193) + (!fields 5 + (!pair "cfa" + (!type already_seen 513) + (!srcfileloc "dwarf2cfi.cc" 66) + nil ) + (!pair "cfa_cfi" + (!type already_seen 474) + (!srcfileloc "dwarf2cfi.cc" 67) + nil ) + (!pair "reg_save" + (!type already_seen 472) + (!srcfileloc "dwarf2cfi.cc" 70) + nil ) + (!pair "window_save" + (!type already_seen 2) + (!srcfileloc "dwarf2cfi.cc" 73) + nil ) + (!pair "ra_mangled" + (!type already_seen 2) + (!srcfileloc "dwarf2cfi.cc" 76) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1273 + (!type pointer 1274 nil gc_used + (!type already_seen 1273) + ) + gc_pointed_to "reg_saved_in_data" + (!srcfileloc "dwarf2cfi.cc" 195) + (!fields 2 + (!pair "orig_reg" + (!type already_seen 99) + (!srcfileloc "dwarf2cfi.cc" 81) + nil ) + (!pair "saved_in_reg" + (!type already_seen 99) + (!srcfileloc "dwarf2cfi.cc" 82) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1275 nil gc_unused "dw_trace_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1276 nil gc_unused "trace_info_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1277 nil gc_unused "queued_reg_save"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1278 nil gc_unused "init_one_dwarf_reg_state"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1279 nil gc_unused "pass_dwarf2_frame"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1280 + (!type pointer 1281 nil gc_used + (!type already_seen 1280) + ) + gc_pointed_to "vec" + (!srcfileloc "dwarf2out.cc" 215) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "dwarf2out.cc" 215) + nil ) + (!pair "dw_fde_ref" + (!type already_seen 470) + (!srcfileloc "dwarf2out.cc" 215) + nil ) + ) + ) + + (!type struct 1282 nil gc_used "indirect_string_hasher" + (!srcfileloc "dwarf2out.cc" 233) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1283 + (!type pointer 1284 nil gc_used + (!type already_seen 1283) + ) + gc_pointed_to "hash_table" + (!srcfileloc "dwarf2out.cc" 233) + (!fields 1 + (!pair "indirect_string_hasher" + (!type already_seen 1282) + (!srcfileloc "dwarf2out.cc" 233) + nil ) + ) + ) + + (!type user_struct 1285 + (!type pointer 1286 nil gc_used + (!type already_seen 1285) + ) + gc_pointed_to "vec" + (!srcfileloc "dwarf2out.cc" 275) + (!fields 1 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "dwarf2out.cc" 275) + nil ) + ) + ) + + (!type already_seen 483) + + (!type already_seen 491) + + (!type struct 1287 nil gc_used "dw_line_info_struct" + (!srcfileloc "dwarf2out.cc" 3081) + (!fields 2 + (!pair "opcode" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3079) + nil ) + (!pair "val" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3080) + nil ) + ) + nil 1023 nil nil ) + + (!type user_struct 1288 + (!type pointer 1289 nil gc_used + (!type already_seen 1288) + ) + gc_pointed_to "vec" + (!srcfileloc "dwarf2out.cc" 3123) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "dwarf2out.cc" 3123) + nil ) + (!pair "dw_line_info_entry" + (!type already_seen 1287) + (!srcfileloc "dwarf2out.cc" 3123) + nil ) + ) + ) + + (!type struct 1290 + (!type pointer 1291 nil gc_used + (!type already_seen 1290) + ) + gc_pointed_to "dw_line_info_table" + (!srcfileloc "dwarf2out.cc" 3654) + (!fields 10 + (!pair "end_label" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 3086) + nil ) + (!pair "file_num" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3090) + nil ) + (!pair "line_num" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3091) + nil ) + (!pair "column_num" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3092) + nil ) + (!pair "discrim_num" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3093) + nil ) + (!pair "is_stmt" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3094) + nil ) + (!pair "in_use" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3095) + nil ) + (!pair "view" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3111) + nil ) + (!pair "symviews_since_reset" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3116) + nil ) + (!pair "entries" + (!type already_seen 1289) + (!srcfileloc "dwarf2out.cc" 3123) + nil ) + ) + nil 1023 nil nil ) + + (!type already_seen 489) + + (!type already_seen 494) + + (!type struct 1292 nil gc_unused "set_early_dwarf"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1293 nil gc_used "pubname_struct" + (!srcfileloc "dwarf2out.cc" 3228) + (!fields 2 + (!pair "die" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 3226) + nil ) + (!pair "name" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 3227) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1294 nil gc_used "dw_ranges" + (!srcfileloc "dwarf2out.cc" 3687) + (!fields 6 + (!pair "label" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 3233) + nil ) + (!pair "num" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3236) + nil ) + (!pair "idx" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3242) + nil ) + (!pair "maybe_new_sec" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3245) + nil ) + (!pair "begin_entry" + (!type already_seen 481) + (!srcfileloc "dwarf2out.cc" 3246) + nil ) + (!pair "end_entry" + (!type already_seen 481) + (!srcfileloc "dwarf2out.cc" 3247) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1295 nil gc_used "macinfo_struct" + (!srcfileloc "dwarf2out.cc" 3256) + (!fields 3 + (!pair "code" + (!type already_seen 8) + (!srcfileloc "dwarf2out.cc" 3253) + nil ) + (!pair "lineno" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3254) + nil ) + (!pair "info" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 3255) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1296 nil gc_used "dw_ranges_by_label" + (!srcfileloc "dwarf2out.cc" 3690) + (!fields 2 + (!pair "begin" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 3261) + nil ) + (!pair "end" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 3262) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1297 + (!type pointer 1298 nil gc_used + (!type already_seen 1297) + ) + gc_pointed_to "limbo_die_struct" + (!srcfileloc "dwarf2out.cc" 3284) + (!fields 3 + (!pair "die" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 3279) + nil ) + (!pair "created_for" + (!type already_seen 23) + (!srcfileloc "dwarf2out.cc" 3282) + nil ) + (!pair "next" + (!type already_seen 1298) + (!srcfileloc "dwarf2out.cc" 3283) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1299 nil gc_unused "skeleton_chain_struct"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1300 nil gc_used "dwarf_file_hasher" + (!srcfileloc "dwarf2out.cc" 3501) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1301 + (!type pointer 1302 nil gc_used + (!type already_seen 1301) + ) + gc_pointed_to "hash_table" + (!srcfileloc "dwarf2out.cc" 3501) + (!fields 1 + (!pair "dwarf_file_hasher" + (!type already_seen 1300) + (!srcfileloc "dwarf2out.cc" 3501) + nil ) + ) + ) + + (!type struct 1303 nil gc_used "decl_die_hasher" + (!srcfileloc "dwarf2out.cc" 3512) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1304 + (!type pointer 1305 nil gc_used + (!type already_seen 1304) + ) + gc_pointed_to "hash_table" + (!srcfileloc "dwarf2out.cc" 3512) + (!fields 1 + (!pair "decl_die_hasher" + (!type already_seen 1303) + (!srcfileloc "dwarf2out.cc" 3512) + nil ) + ) + ) + + (!type user_struct 1306 + (!type pointer 1307 nil gc_used + (!type already_seen 1306) + ) + gc_pointed_to "vec" + (!srcfileloc "dwarf2out.cc" 3516) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "dwarf2out.cc" 3516) + nil ) + (!pair "dw_die_ref" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 3516) + nil ) + ) + ) + + (!type struct 1308 nil gc_pointed_to "variable_value_struct" + (!srcfileloc "dwarf2out.cc" 3517) + (!fields 2 + (!pair "decl_id" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3515) + nil ) + (!pair "dies" + (!type already_seen 1307) + (!srcfileloc "dwarf2out.cc" 3516) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1309 nil gc_used "variable_value_hasher" + (!srcfileloc "dwarf2out.cc" 3529) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1310 + (!type pointer 1311 nil gc_used + (!type already_seen 1310) + ) + gc_pointed_to "hash_table" + (!srcfileloc "dwarf2out.cc" 3529) + (!fields 1 + (!pair "variable_value_hasher" + (!type already_seen 1309) + (!srcfileloc "dwarf2out.cc" 3529) + nil ) + ) + ) + + (!type struct 1312 nil gc_used "block_die_hasher" + (!srcfileloc "dwarf2out.cc" 3539) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1313 + (!type pointer 1314 nil gc_used + (!type already_seen 1313) + ) + gc_pointed_to "hash_table" + (!srcfileloc "dwarf2out.cc" 3539) + (!fields 1 + (!pair "block_die_hasher" + (!type already_seen 1312) + (!srcfileloc "dwarf2out.cc" 3539) + nil ) + ) + ) + + (!type struct 1315 nil gc_used "die_arg_entry_struct" + (!srcfileloc "dwarf2out.cc" 3544) + (!fields 2 + (!pair "die" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 3542) + nil ) + (!pair "arg" + (!type already_seen 23) + (!srcfileloc "dwarf2out.cc" 3543) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1316 + (!type pointer 1317 nil gc_used + (!type already_seen 1316) + ) + gc_pointed_to "var_loc_node" + (!srcfileloc "dwarf2out.cc" 3561) + (!fields 4 + (!pair "loc" + (!type already_seen 99) + (!srcfileloc "dwarf2out.cc" 3557) + nil ) + (!pair "label" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 3558) + nil ) + (!pair "next" + (!type already_seen 1317) + (!srcfileloc "dwarf2out.cc" 3559) + nil ) + (!pair "view" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3560) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 1023 nil nil ) + + (!type struct 1318 nil gc_pointed_to "var_loc_list_def" + (!srcfileloc "dwarf2out.cc" 3582) + (!fields 4 + (!pair "first" + (!type already_seen 1317) + (!srcfileloc "dwarf2out.cc" 3565) + nil ) + (!pair "last" + (!type already_seen 1317) + (!srcfileloc "dwarf2out.cc" 3573) + (!options + (!option skip string "%h") + ) + ) + (!pair "last_before_switch" + (!type already_seen 1317) + (!srcfileloc "dwarf2out.cc" 3578) + (!options + (!option skip string "%h") + ) + ) + (!pair "decl_id" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3581) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1319 + (!type pointer 1320 nil gc_used + (!type already_seen 1319) + ) + gc_pointed_to "call_arg_loc_node" + (!srcfileloc "dwarf2out.cc" 3593) + (!fields 6 + (!pair "call_arg_loc_note" + (!type already_seen 99) + (!srcfileloc "dwarf2out.cc" 3587) + nil ) + (!pair "label" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 3588) + nil ) + (!pair "block" + (!type already_seen 23) + (!srcfileloc "dwarf2out.cc" 3589) + nil ) + (!pair "tail_call_p" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3590) + nil ) + (!pair "symbol_ref" + (!type already_seen 99) + (!srcfileloc "dwarf2out.cc" 3591) + nil ) + (!pair "next" + (!type already_seen 1320) + (!srcfileloc "dwarf2out.cc" 3592) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 1023 nil nil ) + + (!type struct 1321 nil gc_used "decl_loc_hasher" + (!srcfileloc "dwarf2out.cc" 3605) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1322 + (!type pointer 1323 nil gc_used + (!type already_seen 1322) + ) + gc_pointed_to "hash_table" + (!srcfileloc "dwarf2out.cc" 3605) + (!fields 1 + (!pair "decl_loc_hasher" + (!type already_seen 1321) + (!srcfileloc "dwarf2out.cc" 3605) + nil ) + ) + ) + + (!type struct 1324 nil gc_pointed_to "cached_dw_loc_list_def" + (!srcfileloc "dwarf2out.cc" 3623) + (!fields 2 + (!pair "decl_id" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3619) + nil ) + (!pair "loc_list" + (!type already_seen 485) + (!srcfileloc "dwarf2out.cc" 3622) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1325 nil gc_used "dw_loc_list_hasher" + (!srcfileloc "dwarf2out.cc" 3636) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1326 + (!type pointer 1327 nil gc_used + (!type already_seen 1326) + ) + gc_pointed_to "hash_table" + (!srcfileloc "dwarf2out.cc" 3636) + (!fields 1 + (!pair "dw_loc_list_hasher" + (!type already_seen 1325) + (!srcfileloc "dwarf2out.cc" 3636) + nil ) + ) + ) + + (!type user_struct 1328 + (!type pointer 1329 nil gc_used + (!type already_seen 1328) + ) + gc_pointed_to "vec" + (!srcfileloc "dwarf2out.cc" 3661) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "dwarf2out.cc" 3661) + nil ) + (!pair "dw_line_info_table" + (!type already_seen 1291) + (!srcfileloc "dwarf2out.cc" 3661) + nil ) + ) + ) + + (!type user_struct 1330 + (!type pointer 1331 nil gc_used + (!type already_seen 1330) + ) + gc_pointed_to "vec" + (!srcfileloc "dwarf2out.cc" 3669) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "dwarf2out.cc" 3669) + nil ) + (!pair "pubname_entry" + (!type already_seen 1293) + (!srcfileloc "dwarf2out.cc" 3669) + nil ) + ) + ) + + (!type user_struct 1332 + (!type pointer 1333 nil gc_used + (!type already_seen 1332) + ) + gc_pointed_to "vec" + (!srcfileloc "dwarf2out.cc" 3677) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "dwarf2out.cc" 3677) + nil ) + (!pair "macinfo_entry" + (!type already_seen 1295) + (!srcfileloc "dwarf2out.cc" 3677) + nil ) + ) + ) + + (!type user_struct 1334 + (!type pointer 1335 nil gc_used + (!type already_seen 1334) + ) + gc_pointed_to "vec" + (!srcfileloc "dwarf2out.cc" 3687) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "dwarf2out.cc" 3687) + nil ) + (!pair "dw_ranges" + (!type already_seen 1294) + (!srcfileloc "dwarf2out.cc" 3687) + nil ) + ) + ) + + (!type user_struct 1336 + (!type pointer 1337 nil gc_used + (!type already_seen 1336) + ) + gc_pointed_to "vec" + (!srcfileloc "dwarf2out.cc" 3690) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "dwarf2out.cc" 3690) + nil ) + (!pair "dw_ranges_by_label" + (!type already_seen 1296) + (!srcfileloc "dwarf2out.cc" 3690) + nil ) + ) + ) + + (!type user_struct 1338 + (!type pointer 1339 nil gc_used + (!type already_seen 1338) + ) + gc_pointed_to "vec" + (!srcfileloc "dwarf2out.cc" 3707) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "dwarf2out.cc" 3707) + nil ) + (!pair "die_arg_entry" + (!type already_seen 1315) + (!srcfileloc "dwarf2out.cc" 3707) + nil ) + ) + ) + + (!type struct 1340 nil gc_unused "md5_ctx"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1341 nil gc_unused "checksum_attributes"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1342 nil gc_unused "loc_descr_context"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1343 nil gc_unused "vlr_context"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1344 nil gc_used "addr_hasher" + (!srcfileloc "dwarf2out.cc" 5072) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1345 + (!type pointer 1346 nil gc_used + (!type already_seen 1345) + ) + gc_pointed_to "hash_table" + (!srcfileloc "dwarf2out.cc" 5072) + (!fields 1 + (!pair "addr_hasher" + (!type already_seen 1344) + (!srcfileloc "dwarf2out.cc" 5072) + nil ) + ) + ) + + (!type struct 1347 nil gc_used "sym_off_pair" + (!srcfileloc "dwarf2out.cc" 5938) + (!fields 2 + (!pair "sym" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 5935) + (!options + (!option skip string "") + ) + ) + (!pair "off" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 5936) + nil ) + ) + nil 1023 nil nil ) + + (!type user_struct 1348 + (!type pointer 1349 nil gc_used + (!type already_seen 1348) + ) + gc_pointed_to "hash_map" + (!srcfileloc "dwarf2out.cc" 5938) + (!fields 2 + (!pair "sym_off_pair" + (!type already_seen 1347) + (!srcfileloc "dwarf2out.cc" 5938) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "dwarf2out.cc" 5938) + nil ) + ) + ) + + (!type struct 1350 nil gc_unused "decl_table_entry"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1351 nil gc_unused "decl_table_entry_hasher" + (!srcfileloc "dwarf2out.cc" 8326) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1352 nil gc_unused "hash_table" + (!srcfileloc "dwarf2out.cc" 8326) + (!fields 1 + (!pair "decl_table_entry_hasher" + (!type already_seen 1351) + (!srcfileloc "dwarf2out.cc" 8326) + nil ) + ) + ) + + (!type struct 1353 nil gc_unused "external_ref"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1354 nil gc_unused "external_ref_hasher" + (!srcfileloc "dwarf2out.cc" 9084) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1355 nil gc_unused "hash_table" + (!srcfileloc "dwarf2out.cc" 9084) + (!fields 1 + (!pair "external_ref_hasher" + (!type already_seen 1354) + (!srcfileloc "dwarf2out.cc" 9084) + nil ) + ) + ) + + (!type struct 1356 nil gc_unused "file_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1357 nil gc_unused "dir_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1358 nil gc_unused "file_name_acquire_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1359 nil gc_unused "dwarf_qual_info_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1360 nil gc_unused "dwarf_procedure_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1361 nil gc_pointed_to "inline_entry_data" + (!srcfileloc "dwarf2out.cc" 24267) + (!fields 4 + (!pair "block" + (!type already_seen 23) + (!srcfileloc "dwarf2out.cc" 24259) + nil ) + (!pair "label_pfx" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 24262) + nil ) + (!pair "label_num" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 24263) + nil ) + (!pair "view" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 24266) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1362 nil gc_used "inline_entry_data_hasher" + (!srcfileloc "dwarf2out.cc" 24293) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1363 + (!type pointer 1364 nil gc_used + (!type already_seen 1363) + ) + gc_pointed_to "hash_table" + (!srcfileloc "dwarf2out.cc" 24293) + (!fields 1 + (!pair "inline_entry_data_hasher" + (!type already_seen 1362) + (!srcfileloc "dwarf2out.cc" 24293) + nil ) + ) + ) + + (!type struct 1365 nil gc_unused "macinfo_entry_hasher" + (!srcfileloc "dwarf2out.cc" 28847) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1366 nil gc_unused "hash_table" + (!srcfileloc "dwarf2out.cc" 28847) + (!fields 1 + (!pair "macinfo_entry_hasher" + (!type already_seen 1365) + (!srcfileloc "dwarf2out.cc" 28847) + nil ) + ) + ) + + (!type struct 1367 nil gc_unused "comdat_type_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1368 nil gc_unused "loc_list_hasher" + (!srcfileloc "dwarf2out.cc" 31902) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1369 nil gc_unused "hash_table" + (!srcfileloc "dwarf2out.cc" 31902) + (!fields 1 + (!pair "loc_list_hasher" + (!type already_seen 1368) + (!srcfileloc "dwarf2out.cc" 31902) + nil ) + ) + ) + + (!type struct 1370 + (!type pointer 1371 nil gc_used + (!type already_seen 1370) + ) + gc_pointed_to "ctf_string" + (!srcfileloc "ctfc.h" 57) + (!fields 2 + (!pair "cts_str" + (!type already_seen 11) + (!srcfileloc "ctfc.h" 55) + nil ) + (!pair "cts_next" + (!type already_seen 1371) + (!srcfileloc "ctfc.h" 56) + nil ) + ) + (!options + (!option chain_next string "%h.cts_next") + ) + 1023 nil nil ) + + (!type struct 1372 nil gc_used "ctf_strtable" + (!srcfileloc "ctfc.h" 68) + (!fields 5 + (!pair "ctstab_head" + (!type already_seen 1371) + (!srcfileloc "ctfc.h" 63) + nil ) + (!pair "ctstab_tail" + (!type already_seen 1371) + (!srcfileloc "ctfc.h" 64) + nil ) + (!pair "ctstab_num" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 65) + nil ) + (!pair "ctstab_len" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 66) + nil ) + (!pair "ctstab_estr" + (!type already_seen 11) + (!srcfileloc "ctfc.h" 67) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1373 nil gc_used "ctf_encoding" + (!srcfileloc "ctfc.h" 78) + (!fields 3 + (!pair "cte_format" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 75) + nil ) + (!pair "cte_offset" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 76) + nil ) + (!pair "cte_bits" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 77) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1374 nil gc_used "ctf_arinfo" + (!srcfileloc "ctfc.h" 87) + (!fields 3 + (!pair "ctr_contents" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 84) + nil ) + (!pair "ctr_index" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 85) + nil ) + (!pair "ctr_nelems" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 86) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1375 nil gc_unused "ctf_funcinfo" + (!srcfileloc "ctfc.h" 96) + (!fields 3 + (!pair "ctc_return" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 93) + nil ) + (!pair "ctc_argc" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 94) + nil ) + (!pair "ctc_flags" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 95) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1376 nil gc_used "ctf_sliceinfo" + (!srcfileloc "ctfc.h" 103) + (!fields 3 + (!pair "cts_type" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 100) + nil ) + (!pair "cts_offset" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 101) + nil ) + (!pair "cts_bits" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 102) + nil ) + ) + nil 1023 nil nil ) + + (!type union 1377 nil gc_used "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/ctfc.h:113" + (!srcfileloc "ctfc.h" 116) + (!fields 2 + (!pair "_size" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 114) + (!options + (!option tag string "0") + ) + ) + (!pair "_type" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 115) + (!options + (!option tag string "1") + ) + ) + ) + (!options + (!option desc string "0") + ) + 1023 nil ) + + (!type struct 1378 nil gc_used "ctf_itype" + (!srcfileloc "ctfc.h" 119) + (!fields 5 + (!pair "ctti_name" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 110) + nil ) + (!pair "ctti_info" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 111) + nil ) + (!pair "_u" + (!type already_seen 1377) + (!srcfileloc "ctfc.h" 116) + nil ) + (!pair "ctti_lsizehi" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 117) + nil ) + (!pair "ctti_lsizelo" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 118) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1379 + (!type pointer 1380 nil gc_used + (!type already_seen 1379) + ) + gc_pointed_to "ctf_dmdef" + (!srcfileloc "ctfc.h" 138) + (!fields 6 + (!pair "dmd_name" + (!type already_seen 11) + (!srcfileloc "ctfc.h" 132) + nil ) + (!pair "dmd_type" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 133) + nil ) + (!pair "dmd_name_offset" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 134) + nil ) + (!pair "dmd_offset" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 135) + nil ) + (!pair "dmd_value" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 136) + nil ) + (!pair "dmd_next" + (!type already_seen 1380) + (!srcfileloc "ctfc.h" 137) + nil ) + ) + (!options + (!option chain_next string "%h.dmd_next") + ) + 1023 nil nil ) + + (!type struct 1381 + (!type pointer 1382 nil gc_used + (!type already_seen 1381) + ) + gc_pointed_to "ctf_func_arg" + (!srcfileloc "ctfc.h" 150) + (!fields 4 + (!pair "farg_type" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 146) + nil ) + (!pair "farg_name" + (!type already_seen 11) + (!srcfileloc "ctfc.h" 147) + nil ) + (!pair "farg_name_offset" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 148) + nil ) + (!pair "farg_next" + (!type already_seen 1382) + (!srcfileloc "ctfc.h" 149) + nil ) + ) + nil 1023 nil nil ) + + (!type union 1383 nil gc_used "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/ctfc.h:165" + (!srcfileloc "ctfc.h" 176) + (!fields 5 + (!pair "dtu_members" + (!type already_seen 1380) + (!srcfileloc "ctfc.h" 167) + (!options + (!option tag string "CTF_DTU_D_MEMBERS") + ) + ) + (!pair "dtu_arr" + (!type already_seen 1374) + (!srcfileloc "ctfc.h" 169) + (!options + (!option tag string "CTF_DTU_D_ARRAY") + ) + ) + (!pair "dtu_enc" + (!type already_seen 1373) + (!srcfileloc "ctfc.h" 171) + (!options + (!option tag string "CTF_DTU_D_ENCODING") + ) + ) + (!pair "dtu_argv" + (!type already_seen 1382) + (!srcfileloc "ctfc.h" 173) + (!options + (!option tag string "CTF_DTU_D_ARGUMENTS") + ) + ) + (!pair "dtu_slice" + (!type already_seen 1376) + (!srcfileloc "ctfc.h" 175) + (!options + (!option tag string "CTF_DTU_D_SLICE") + ) + ) + ) + (!options + (!option desc string "ctf_dtu_d_union_selector (&%1)") + ) + 1023 nil ) + + (!type struct 1384 + (!type pointer 1385 + (!type pointer 1386 nil gc_unused + (!type already_seen 1385) + ) + gc_used + (!type already_seen 1384) + ) + gc_pointed_to "ctf_dtdef" + (!srcfileloc "ctfc.h" 177) + (!fields 6 + (!pair "dtd_key" + (!type already_seen 487) + (!srcfileloc "ctfc.h" 158) + nil ) + (!pair "dtd_name" + (!type already_seen 11) + (!srcfileloc "ctfc.h" 159) + nil ) + (!pair "dtd_type" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 160) + nil ) + (!pair "dtd_data" + (!type already_seen 1378) + (!srcfileloc "ctfc.h" 161) + nil ) + (!pair "from_global_func" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 162) + nil ) + (!pair "dtd_u" + (!type already_seen 1383) + (!srcfileloc "ctfc.h" 176) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1387 + (!type pointer 1388 + (!type pointer 1389 nil gc_unused + (!type already_seen 1388) + ) + gc_used + (!type already_seen 1387) + ) + gc_pointed_to "ctf_dvdef" + (!srcfileloc "ctfc.h" 190) + (!fields 5 + (!pair "dvd_key" + (!type already_seen 487) + (!srcfileloc "ctfc.h" 185) + nil ) + (!pair "dvd_name" + (!type already_seen 11) + (!srcfileloc "ctfc.h" 186) + nil ) + (!pair "dvd_name_offset" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 187) + nil ) + (!pair "dvd_visibility" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 188) + nil ) + (!pair "dvd_type" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 189) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1390 + (!type pointer 1391 nil gc_unused + (!type already_seen 1390) + ) + gc_unused "ctf_srcloc" + (!srcfileloc "ctfc.h" 204) + (!fields 3 + (!pair "ctsloc_file" + (!type already_seen 11) + (!srcfileloc "ctfc.h" 201) + nil ) + (!pair "ctsloc_line" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 202) + nil ) + (!pair "ctsloc_col" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 203) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1392 nil gc_used "ctfc_dtd_hasher" + (!srcfileloc "ctfc.h" 274) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 1393 nil gc_used "ctfc_dvd_hasher" + (!srcfileloc "ctfc.h" 276) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1394 + (!type pointer 1395 nil gc_used + (!type already_seen 1394) + ) + gc_pointed_to "hash_table" + (!srcfileloc "ctfc.h" 274) + (!fields 1 + (!pair "ctfc_dtd_hasher" + (!type already_seen 1392) + (!srcfileloc "ctfc.h" 274) + nil ) + ) + ) + + (!type user_struct 1396 + (!type pointer 1397 nil gc_used + (!type already_seen 1396) + ) + gc_pointed_to "hash_table" + (!srcfileloc "ctfc.h" 276) + (!fields 1 + (!pair "ctfc_dvd_hasher" + (!type already_seen 1393) + (!srcfileloc "ctfc.h" 276) + nil ) + ) + ) + + (!type struct 1398 + (!type pointer 1399 nil gc_used + (!type already_seen 1398) + ) + gc_pointed_to "ctf_container" + (!srcfileloc "ctfc.h" 332) + (!fields 23 + (!pair "ctfc_magic" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 268) + nil ) + (!pair "ctfc_version" + (!type already_seen 8) + (!srcfileloc "ctfc.h" 269) + nil ) + (!pair "ctfc_flags" + (!type already_seen 8) + (!srcfileloc "ctfc.h" 270) + nil ) + (!pair "ctfc_cuname_offset" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 271) + nil ) + (!pair "ctfc_types" + (!type already_seen 1395) + (!srcfileloc "ctfc.h" 274) + nil ) + (!pair "ctfc_vars" + (!type already_seen 1397) + (!srcfileloc "ctfc.h" 276) + nil ) + (!pair "ctfc_ignore_vars" + (!type already_seen 1397) + (!srcfileloc "ctfc.h" 278) + nil ) + (!pair "ctfc_strtable" + (!type already_seen 1372) + (!srcfileloc "ctfc.h" 281) + nil ) + (!pair "ctfc_aux_strtable" + (!type already_seen 1372) + (!srcfileloc "ctfc.h" 284) + nil ) + (!pair "ctfc_num_types" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 286) + nil ) + (!pair "ctfc_num_stypes" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 287) + nil ) + (!pair "ctfc_num_global_funcs" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 288) + nil ) + (!pair "ctfc_num_global_objts" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 289) + nil ) + (!pair "ctfc_num_vlen_bytes" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 294) + nil ) + (!pair "ctfc_nextid" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 297) + nil ) + (!pair "ctfc_vars_list" + (!type already_seen 1389) + (!srcfileloc "ctfc.h" 305) + (!options + (!option length string "0") + ) + ) + (!pair "ctfc_vars_list_count" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 307) + nil ) + (!pair "ctfc_types_list" + (!type already_seen 1386) + (!srcfileloc "ctfc.h" 311) + (!options + (!option length string "0") + ) + ) + (!pair "ctfc_gfuncs_list" + (!type already_seen 1386) + (!srcfileloc "ctfc.h" 315) + (!options + (!option length string "0") + ) + ) + (!pair "ctfc_gobjts_list" + (!type already_seen 1389) + (!srcfileloc "ctfc.h" 318) + (!options + (!option length string "0") + ) + ) + (!pair "ctfc_numbytes_asm" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 326) + nil ) + (!pair "ctfc_strlen" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 328) + nil ) + (!pair "ctfc_aux_strlen" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 330) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1400 nil gc_unused "ctf_dtd_preprocess_arg"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1401 nil gc_unused "ctf_dvd_preprocess_arg"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1402 + (!type pointer 1403 nil gc_used + (!type already_seen 1402) + ) + gc_pointed_to "hash_map" + (!srcfileloc "btfout.cc" 73) + (!fields 2 + (!pair "unsigned" + (!type undefined 1404 nil gc_unused "unsigned" + (!srcfileloc "btfout.cc" 73) + ) + (!srcfileloc "btfout.cc" 73) + nil ) + (!pair "ctf_dvdef_ref" + (!type already_seen 1388) + (!srcfileloc "btfout.cc" 73) + nil ) + ) + ) + + (!type already_seen 1404) + + (!type struct 1405 nil gc_unused "btf_datasec"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1406 + (!type pointer 1407 nil gc_used + (!type already_seen 1406) + ) + gc_pointed_to "vec" + (!srcfileloc "btfout.cc" 105) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "btfout.cc" 105) + nil ) + (!pair "ctf_dtdef_ref" + (!type already_seen 1385) + (!srcfileloc "btfout.cc" 105) + nil ) + ) + ) + + (!type struct 1408 nil gc_unused "btf_var_secinfo"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1409 nil gc_unused "pass_lower_vector"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1410 nil gc_unused "pass_lower_vector_ssa"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1411 nil gc_unused "pass_gimple_isel"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1412 + (!type pointer 1413 nil gc_used + (!type already_seen 1412) + ) + gc_pointed_to "temp_slot" + (!srcfileloc "function.cc" 591) + (!fields 10 + (!pair "next" + (!type already_seen 1413) + (!srcfileloc "function.cc" 567) + nil ) + (!pair "prev" + (!type already_seen 1413) + (!srcfileloc "function.cc" 569) + nil ) + (!pair "slot" + (!type already_seen 99) + (!srcfileloc "function.cc" 571) + nil ) + (!pair "size" + (!type already_seen 2) + (!srcfileloc "function.cc" 573) + nil ) + (!pair "type" + (!type already_seen 23) + (!srcfileloc "function.cc" 578) + nil ) + (!pair "align" + (!type already_seen 2) + (!srcfileloc "function.cc" 580) + nil ) + (!pair "in_use" + (!type already_seen 8) + (!srcfileloc "function.cc" 582) + nil ) + (!pair "level" + (!type already_seen 2) + (!srcfileloc "function.cc" 584) + nil ) + (!pair "base_offset" + (!type already_seen 2) + (!srcfileloc "function.cc" 587) + nil ) + (!pair "full_size" + (!type already_seen 2) + (!srcfileloc "function.cc" 590) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1414 nil gc_used "incoming_args" + (!srcfileloc "emit-rtl.h" 55) + (!fields 6 + (!pair "pops_args" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 33) + nil ) + (!pair "size" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 38) + nil ) + (!pair "pretend_args_size" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 43) + nil ) + (!pair "arg_offset_rtx" + (!type already_seen 99) + (!srcfileloc "emit-rtl.h" 47) + nil ) + (!pair "info" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 51) + nil ) + (!pair "internal_arg_pointer" + (!type already_seen 99) + (!srcfileloc "emit-rtl.h" 54) + nil ) + ) + nil 1023 nil nil ) + + (!type undefined 1415 + (!type pointer 1416 nil gc_unused + (!type already_seen 1415) + ) + gc_unused "rtl_ssa::function_info" + (!srcfileloc "emit-rtl.h" 77) + ) + + (!type struct 1417 + (!type pointer 1418 nil gc_used + (!type already_seen 1417) + ) + gc_pointed_to "initial_value_struct" + (!srcfileloc "function.cc" 1266) + (!fields 3 + (!pair "num_entries" + (!type already_seen 2) + (!srcfileloc "function.cc" 1263) + nil ) + (!pair "max_entries" + (!type already_seen 2) + (!srcfileloc "function.cc" 1264) + nil ) + (!pair "entries" + (!type pointer 1419 nil gc_unused + (!type struct 1420 + (!type already_seen 1419) + gc_used "initial_value_pair" + (!srcfileloc "function.cc" 1265) + (!fields 2 + (!pair "hard_reg" + (!type already_seen 99) + (!srcfileloc "function.cc" 1256) + nil ) + (!pair "pseudo" + (!type already_seen 99) + (!srcfileloc "function.cc" 1257) + nil ) + ) + nil 1023 nil nil ) + ) + (!srcfileloc "function.cc" 1265) + (!options + (!option length string "%h.num_entries") + ) + ) + ) + nil 1023 nil nil ) + + (!type user_struct 1421 + (!type pointer 1422 nil gc_used + (!type already_seen 1421) + ) + gc_pointed_to "vec" + (!srcfileloc "emit-rtl.h" 148) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "emit-rtl.h" 148) + nil ) + (!pair "temp_slot_p" + (!type already_seen 1413) + (!srcfileloc "emit-rtl.h" 148) + nil ) + ) + ) + + (!type struct 1423 nil gc_used "rtl_data" + (!srcfileloc "emit-rtl.h" 321) + (!fields 66 + (!pair "expr" + (!type already_seen 996) + (!srcfileloc "emit-rtl.h" 62) + nil ) + (!pair "emit" + (!type already_seen 993) + (!srcfileloc "emit-rtl.h" 63) + nil ) + (!pair "varasm" + (!type already_seen 1008) + (!srcfileloc "emit-rtl.h" 64) + nil ) + (!pair "args" + (!type already_seen 1414) + (!srcfileloc "emit-rtl.h" 65) + nil ) + (!pair "subsections" + (!type already_seen 1009) + (!srcfileloc "emit-rtl.h" 66) + nil ) + (!pair "eh" + (!type already_seen 1001) + (!srcfileloc "emit-rtl.h" 67) + nil ) + (!pair "abi" + (!type already_seen 903) + (!srcfileloc "emit-rtl.h" 75) + (!options + (!option skip string "") + ) + ) + (!pair "ssa" + (!type already_seen 1416) + (!srcfileloc "emit-rtl.h" 77) + (!options + (!option skip string "") + ) + ) + (!pair "outgoing_args_size" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 83) + nil ) + (!pair "return_rtx" + (!type already_seen 99) + (!srcfileloc "emit-rtl.h" 89) + nil ) + (!pair "hard_reg_initial_vals" + (!type already_seen 1418) + (!srcfileloc "emit-rtl.h" 96) + nil ) + (!pair "stack_protect_guard" + (!type already_seen 23) + (!srcfileloc "emit-rtl.h" 100) + nil ) + (!pair "stack_protect_guard_decl" + (!type already_seen 23) + (!srcfileloc "emit-rtl.h" 104) + nil ) + (!pair "x_nonlocal_goto_handler_labels" + (!type already_seen 746) + (!srcfileloc "emit-rtl.h" 108) + nil ) + (!pair "x_return_label" + (!type already_seen 365) + (!srcfileloc "emit-rtl.h" 113) + nil ) + (!pair "x_naked_return_label" + (!type already_seen 365) + (!srcfileloc "emit-rtl.h" 118) + nil ) + (!pair "x_stack_slot_list" + (!type already_seen 220) + (!srcfileloc "emit-rtl.h" 122) + nil ) + (!pair "frame_space_list" + (!type already_seen 1011) + (!srcfileloc "emit-rtl.h" 125) + nil ) + (!pair "x_stack_check_probe_note" + (!type already_seen 755) + (!srcfileloc "emit-rtl.h" 128) + nil ) + (!pair "x_arg_pointer_save_area" + (!type already_seen 99) + (!srcfileloc "emit-rtl.h" 134) + nil ) + (!pair "drap_reg" + (!type already_seen 99) + (!srcfileloc "emit-rtl.h" 137) + nil ) + (!pair "x_frame_offset" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 142) + nil ) + (!pair "x_parm_birth_insn" + (!type already_seen 296) + (!srcfileloc "emit-rtl.h" 145) + nil ) + (!pair "x_used_temp_slots" + (!type already_seen 1422) + (!srcfileloc "emit-rtl.h" 148) + nil ) + (!pair "x_avail_temp_slots" + (!type already_seen 1413) + (!srcfileloc "emit-rtl.h" 151) + nil ) + (!pair "x_temp_slot_level" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 154) + nil ) + (!pair "stack_alignment_needed" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 158) + nil ) + (!pair "preferred_stack_boundary" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 162) + nil ) + (!pair "parm_stack_boundary" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 165) + nil ) + (!pair "max_used_stack_slot_alignment" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 168) + nil ) + (!pair "stack_alignment_estimated" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 177) + nil ) + (!pair "patch_area_size" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 180) + nil ) + (!pair "patch_area_entry" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 183) + nil ) + (!pair "accesses_prior_frames" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 189) + nil ) + (!pair "calls_eh_return" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 192) + nil ) + (!pair "saves_all_registers" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 196) + nil ) + (!pair "has_nonlocal_goto" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 200) + nil ) + (!pair "has_asm_statement" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 203) + nil ) + (!pair "all_throwers_are_sibcalls" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 210) + nil ) + (!pair "limit_stack" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 214) + nil ) + (!pair "profile" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 217) + nil ) + (!pair "uses_const_pool" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 220) + nil ) + (!pair "uses_pic_offset_table" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 223) + nil ) + (!pair "uses_eh_lsda" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 226) + nil ) + (!pair "tail_call_emit" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 229) + nil ) + (!pair "arg_pointer_save_area_init" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 232) + nil ) + (!pair "frame_pointer_needed" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 237) + nil ) + (!pair "maybe_hot_insn_p" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 240) + nil ) + (!pair "stack_realign_needed" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 247) + nil ) + (!pair "stack_realign_tried" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 252) + nil ) + (!pair "need_drap" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 256) + nil ) + (!pair "stack_realign_processed" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 261) + nil ) + (!pair "stack_realign_finalized" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 265) + nil ) + (!pair "dbr_scheduled_p" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 268) + nil ) + (!pair "nothrow" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 273) + nil ) + (!pair "shrink_wrapped" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 276) + nil ) + (!pair "shrink_wrapped_separate" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 280) + nil ) + (!pair "sp_is_unchanging" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 285) + nil ) + (!pair "sp_is_clobbered_by_asm" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 288) + nil ) + (!pair "is_leaf" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 294) + nil ) + (!pair "uses_only_leaf_regs" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 299) + nil ) + (!pair "has_bb_partition" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 304) + nil ) + (!pair "bb_reorder_complete" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 308) + nil ) + (!pair "asm_clobbers" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 314) + nil ) + (!pair "must_be_zero_on_return" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 317) + nil ) + (!pair "max_insn_address" + (!type already_seen 2) + (!srcfileloc "emit-rtl.h" 320) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1424 nil gc_used "const_int_hasher" + (!srcfileloc "emit-rtl.cc" 143) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1425 + (!type pointer 1426 nil gc_used + (!type already_seen 1425) + ) + gc_pointed_to "hash_table" + (!srcfileloc "emit-rtl.cc" 143) + (!fields 1 + (!pair "const_int_hasher" + (!type already_seen 1424) + (!srcfileloc "emit-rtl.cc" 143) + nil ) + ) + ) + + (!type struct 1427 nil gc_used "const_wide_int_hasher" + (!srcfileloc "emit-rtl.cc" 151) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1428 + (!type pointer 1429 nil gc_used + (!type already_seen 1428) + ) + gc_pointed_to "hash_table" + (!srcfileloc "emit-rtl.cc" 151) + (!fields 1 + (!pair "const_wide_int_hasher" + (!type already_seen 1427) + (!srcfileloc "emit-rtl.cc" 151) + nil ) + ) + ) + + (!type struct 1430 nil gc_used "const_poly_int_hasher" + (!srcfileloc "emit-rtl.cc" 161) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1431 + (!type pointer 1432 nil gc_used + (!type already_seen 1431) + ) + gc_pointed_to "hash_table" + (!srcfileloc "emit-rtl.cc" 161) + (!fields 1 + (!pair "const_poly_int_hasher" + (!type already_seen 1430) + (!srcfileloc "emit-rtl.cc" 161) + nil ) + ) + ) + + (!type struct 1433 nil gc_used "reg_attr_hasher" + (!srcfileloc "emit-rtl.cc" 170) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1434 + (!type pointer 1435 nil gc_used + (!type already_seen 1434) + ) + gc_pointed_to "hash_table" + (!srcfileloc "emit-rtl.cc" 170) + (!fields 1 + (!pair "reg_attr_hasher" + (!type already_seen 1433) + (!srcfileloc "emit-rtl.cc" 170) + nil ) + ) + ) + + (!type struct 1436 nil gc_used "const_double_hasher" + (!srcfileloc "emit-rtl.cc" 179) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1437 + (!type pointer 1438 nil gc_used + (!type already_seen 1437) + ) + gc_pointed_to "hash_table" + (!srcfileloc "emit-rtl.cc" 179) + (!fields 1 + (!pair "const_double_hasher" + (!type already_seen 1436) + (!srcfileloc "emit-rtl.cc" 179) + nil ) + ) + ) + + (!type struct 1439 nil gc_used "const_fixed_hasher" + (!srcfileloc "emit-rtl.cc" 188) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1440 + (!type pointer 1441 nil gc_used + (!type already_seen 1440) + ) + gc_pointed_to "hash_table" + (!srcfileloc "emit-rtl.cc" 188) + (!fields 1 + (!pair "const_fixed_hasher" + (!type already_seen 1439) + (!srcfileloc "emit-rtl.cc" 188) + nil ) + ) + ) + + (!type already_seen 356) + + (!type already_seen 364) + + (!type already_seen 360) + + (!type already_seen 358) + + (!type already_seen 361) + + (!type already_seen 362) + + (!type already_seen 357) + + (!type already_seen 368) + + (!type already_seen 370) + + (!type already_seen 372) + + (!type already_seen 374) + + (!type struct 1442 nil gc_unused "throw_stmt_node" + (!srcfileloc "except.h" 289) + (!fields 2 + (!pair "stmt" + (!type already_seen 281) + (!srcfileloc "except.h" 287) + nil ) + (!pair "lp_nr" + (!type already_seen 2) + (!srcfileloc "except.h" 288) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1443 nil gc_unused "pieces_addr"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1444 nil gc_unused "op_by_pieces_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1445 nil gc_unused "move_by_pieces_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1446 nil gc_unused "store_by_pieces_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1447 nil gc_unused "compare_by_pieces_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1448 + (!type pointer 1449 nil gc_unused + (!type already_seen 1448) + ) + gc_unused "separate_ops"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1450 nil gc_unused "by_pieces_prev"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1451 nil gc_used "insn_cache_hasher" + (!srcfileloc "function.cc" 131) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1452 + (!type pointer 1453 nil gc_used + (!type already_seen 1452) + ) + gc_pointed_to "hash_table" + (!srcfileloc "function.cc" 131) + (!fields 1 + (!pair "insn_cache_hasher" + (!type already_seen 1451) + (!srcfileloc "function.cc" 131) + nil ) + ) + ) + + (!type struct 1454 nil gc_pointed_to "temp_slot_address_entry" + (!srcfileloc "function.cc" 598) + (!fields 3 + (!pair "hash" + (!type already_seen 2) + (!srcfileloc "function.cc" 595) + nil ) + (!pair "address" + (!type already_seen 99) + (!srcfileloc "function.cc" 596) + nil ) + (!pair "temp_slot" + (!type already_seen 1413) + (!srcfileloc "function.cc" 597) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1455 nil gc_used "temp_address_hasher" + (!srcfileloc "function.cc" 608) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1456 + (!type pointer 1457 nil gc_used + (!type already_seen 1456) + ) + gc_pointed_to "hash_table" + (!srcfileloc "function.cc" 608) + (!fields 1 + (!pair "temp_address_hasher" + (!type already_seen 1455) + (!srcfileloc "function.cc" 608) + nil ) + ) + ) + + (!type already_seen 1420) + + (!type struct 1458 nil gc_unused "pass_instantiate_virtual_regs"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1459 nil gc_unused "assign_parm_data_all"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1460 nil gc_unused "assign_parm_data_one"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1461 nil gc_unused "pass_leaf_regs"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1462 nil gc_unused "pass_thread_prologue_and_epilogue"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1463 nil gc_unused "pass_zero_call_used_regs"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1464 nil gc_unused "pass_match_asm_constraints"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1465 + (!type pointer 1466 nil gc_used + (!type already_seen 1465) + ) + gc_pointed_to "hash_map" + (!srcfileloc "except.cc" 151) + (!fields 2 + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "except.cc" 151) + nil ) + (!pair "tree_hash" + (!type already_seen 933) + (!srcfileloc "except.cc" 151) + nil ) + ) + ) + + (!type struct 1467 nil gc_unused "action_record"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1468 nil gc_unused "action_record_hasher" + (!srcfileloc "except.cc" 210) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1469 nil gc_unused "hash_table" + (!srcfileloc "except.cc" 210) + (!fields 1 + (!pair "action_record_hasher" + (!type already_seen 1468) + (!srcfileloc "except.cc" 210) + nil ) + ) + ) + + (!type struct 1470 nil gc_unused "duplicate_eh_regions_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1471 nil gc_unused "ttypes_filter"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1472 nil gc_unused "ttypes_filter_hasher" + (!srcfileloc "except.cc" 729) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1473 nil gc_unused "hash_table" + (!srcfileloc "except.cc" 729) + (!fields 1 + (!pair "ttypes_filter_hasher" + (!type already_seen 1472) + (!srcfileloc "except.cc" 729) + nil ) + ) + ) + + (!type struct 1474 nil gc_unused "ehspec_hasher" + (!srcfileloc "except.cc" 764) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1475 nil gc_unused "hash_table" + (!srcfileloc "except.cc" 764) + (!fields 1 + (!pair "ehspec_hasher" + (!type already_seen 1474) + (!srcfileloc "except.cc" 764) + nil ) + ) + ) + + (!type struct 1476 nil gc_unused "pass_set_nothrow_function_flags"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1477 nil gc_unused "pass_convert_to_eh_region_ranges"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 912) + + (!type struct 1478 + (!type pointer 1479 nil gc_used + (!type already_seen 1478) + ) + gc_pointed_to "test_of_length" + (!srcfileloc "ggc-tests.cc" 68) + (!fields 2 + (!pair "num_elem" + (!type already_seen 2) + (!srcfileloc "ggc-tests.cc" 64) + nil ) + (!pair "elem" + (!type array 1480 nil gc_used "1" + (!type already_seen 1479) + ) + (!srcfileloc "ggc-tests.cc" 65) + (!options + (!option length string "%h.num_elem") + ) + ) + ) + nil 1023 nil nil ) + + (!type already_seen 914) + + (!type struct 1481 + (!type pointer 1482 nil gc_used + (!type already_seen 1481) + ) + gc_pointed_to "test_of_union" + (!srcfileloc "ggc-tests.cc" 134) + (!fields 2 + (!pair "m_kind" + (!type already_seen 2) + (!srcfileloc "ggc-tests.cc" 125) + nil ) + (!pair "m_u" + (!type already_seen 910) + (!srcfileloc "ggc-tests.cc" 129) + (!options + (!option desc string "calc_desc (%0.m_kind)") + ) + ) + ) + nil 1023 nil nil ) + + (!type struct 1483 nil gc_unused "test_struct_with_dtor" + (!srcfileloc "ggc-tests.cc" 176) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 1484 + (!type pointer 1485 nil gc_used + (!type already_seen 1484) + ) + gc_pointed_to "example_base" + (!srcfileloc "ggc-tests.cc" 245) + (!fields 2 + (!pair "m_kind" + (!type already_seen 2) + (!srcfileloc "ggc-tests.cc" 243) + nil ) + (!pair "m_a" + (!type already_seen 911) + (!srcfileloc "ggc-tests.cc" 244) + nil ) + ) + (!options + (!option tag string "0") + (!option desc string "%h.m_kind") + ) + 1023 nil nil ) + + (!type struct 1486 + (!type pointer 1487 nil gc_used + (!type already_seen 1486) + ) + gc_pointed_to "some_subclass" + (!srcfileloc "ggc-tests.cc" 256) + (!fields 1 + (!pair "m_b" + (!type already_seen 911) + (!srcfileloc "ggc-tests.cc" 255) + nil ) + ) + (!options + (!option tag string "1") + ) + 1023 nil + (!type already_seen 1484) + ) + + (!type struct 1488 + (!type pointer 1489 nil gc_used + (!type already_seen 1488) + ) + gc_pointed_to "some_other_subclass" + (!srcfileloc "ggc-tests.cc" 267) + (!fields 1 + (!pair "m_c" + (!type already_seen 911) + (!srcfileloc "ggc-tests.cc" 266) + nil ) + ) + (!options + (!option tag string "2") + ) + 1023 nil + (!type already_seen 1484) + ) + + (!type struct 1490 + (!type pointer 1491 nil gc_used + (!type already_seen 1490) + ) + gc_pointed_to "test_node" + (!srcfileloc "ggc-tests.cc" 326) + (!fields 3 + (!pair "m_prev" + (!type already_seen 1491) + (!srcfileloc "ggc-tests.cc" 323) + nil ) + (!pair "m_next" + (!type already_seen 1491) + (!srcfileloc "ggc-tests.cc" 324) + nil ) + (!pair "m_idx" + (!type already_seen 2) + (!srcfileloc "ggc-tests.cc" 325) + nil ) + ) + (!options + (!option chain_prev string "%h.m_prev") + (!option chain_next string "%h.m_next") + ) + 1023 nil nil ) + + (!type user_struct 1492 + (!type pointer 1493 nil gc_used + (!type already_seen 1492) + ) + gc_pointed_to "user_struct" + (!srcfileloc "ggc-tests.cc" 388) + (!fields 0 ) + ) + + (!type already_seen 617) + + (!type struct 1494 nil gc_unused "gcse_expr"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1495 + (!type pointer 1496 nil gc_unused + (!type already_seen 1495) + ) + gc_unused "gcse_occr"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1497 nil gc_unused "gcse_hash_table_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1498 nil gc_unused "ls_expr"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1499 nil gc_unused "pre_ldst_expr_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1500 nil gc_unused "bb_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1501 nil gc_unused "vec" + (!srcfileloc "gcse.cc" 620) + (!fields 1 + (!pair "rtx_insn" + (!type already_seen 296) + (!srcfileloc "gcse.cc" 620) + nil ) + ) + ) + + (!type user_struct 1502 nil gc_unused "vec" + (!srcfileloc "gcse.cc" 621) + (!fields 1 + (!pair "modify_pair" + (!type undefined 1503 nil gc_unused "modify_pair" + (!srcfileloc "gcse.cc" 621) + ) + (!srcfileloc "gcse.cc" 621) + nil ) + ) + ) + + (!type already_seen 1503) + + (!type struct 1504 nil gc_unused "reg_avail_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1505 nil gc_unused "mem_conflict_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1506 nil gc_unused "edge_list"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1507 nil gc_unused "set_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1508 nil gc_unused "pass_rtl_pre"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1509 nil gc_unused "pass_rtl_hoist"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1510 nil gc_unused "godump_str_hash"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1511 nil gc_unused "macro_hash_value"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1512 nil gc_unused "godump_container"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1513 nil gc_used "libfunc_decl_hasher" + (!srcfileloc "optabs-libfuncs.cc" 720) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1514 + (!type pointer 1515 nil gc_used + (!type already_seen 1514) + ) + gc_pointed_to "hash_table" + (!srcfileloc "optabs-libfuncs.cc" 720) + (!fields 1 + (!pair "libfunc_decl_hasher" + (!type already_seen 1513) + (!srcfileloc "optabs-libfuncs.cc" 720) + nil ) + ) + ) + + (!type struct 1516 nil gc_unused "bb_profile_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1517 nil gc_unused "edge_profile_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1518 nil gc_unused "bb_stats"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1519 nil gc_unused "location_triplet"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1520 nil gc_unused "location_triplet_hash"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1521 + (!type pointer 1522 nil gc_unused + (!type already_seen 1521) + ) + gc_unused "fixup_edge_type" + (!srcfileloc "mcf.cc" 94) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 1523 + (!type pointer 1524 nil gc_unused + (!type already_seen 1523) + ) + gc_unused "fixup_vertex_type" + (!srcfileloc "mcf.cc" 103) + (!fields 0 ) + nil 1023 nil nil ) + + (!type struct 1525 nil gc_unused "fixup_graph_type"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1526 nil gc_unused "queue_type"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1527 nil gc_unused "augmenting_path_type"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type union 1528 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/mcf.cc:343"nil + (!fields 0 ) + nil 0 nil ) + + (!type struct 1529 + (!type pointer 1530 nil gc_unused + (!type already_seen 1529) + ) + gc_unused "stack_def"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1531 + (!type pointer 1532 nil gc_unused + (!type already_seen 1531) + ) + gc_unused "block_info_def"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1533 nil gc_unused "pass_stack_regs"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1534 nil gc_unused "pass_stack_regs_run"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1535 nil gc_unused "pass_free_cfg"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1536 nil gc_unused "pass_into_cfg_layout_mode"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1537 nil gc_unused "pass_outof_cfg_layout_mode"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1538 nil gc_unused "cfg_hooks"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1539 + (!type pointer 1540 nil gc_used + (!type already_seen 1539) + ) + gc_pointed_to "string_pool_data" + (!srcfileloc "stringpool.cc" 244) + (!fields 3 + (!pair "entries" + (!type already_seen 17) + (!srcfileloc "stringpool.cc" 241) + (!options + (!option nested_ptr nested + (!type already_seen 22) + "%h ? HT_IDENT_TO_GCC_IDENT (%h) : NULL" "%h ? GCC_IDENT_TO_HT_IDENT (%h) : NULL") + (!option length string "%h.nslots") + ) + ) + (!pair "nslots" + (!type already_seen 2) + (!srcfileloc "stringpool.cc" 242) + nil ) + (!pair "nelements" + (!type already_seen 2) + (!srcfileloc "stringpool.cc" 243) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1541 nil gc_pointed_to "type_hash" + (!srcfileloc "tree.cc" 174) + (!fields 2 + (!pair "hash" + (!type already_seen 2) + (!srcfileloc "tree.cc" 172) + nil ) + (!pair "type" + (!type already_seen 23) + (!srcfileloc "tree.cc" 173) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1542 nil gc_used "type_cache_hasher" + (!srcfileloc "tree.cc" 198) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1543 + (!type pointer 1544 nil gc_used + (!type already_seen 1543) + ) + gc_pointed_to "hash_table" + (!srcfileloc "tree.cc" 198) + (!fields 1 + (!pair "type_cache_hasher" + (!type already_seen 1542) + (!srcfileloc "tree.cc" 198) + nil ) + ) + ) + + (!type struct 1545 nil gc_used "int_cst_hasher" + (!srcfileloc "tree.cc" 209) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1546 + (!type pointer 1547 nil gc_used + (!type already_seen 1546) + ) + gc_pointed_to "hash_table" + (!srcfileloc "tree.cc" 209) + (!fields 1 + (!pair "int_cst_hasher" + (!type already_seen 1545) + (!srcfileloc "tree.cc" 209) + nil ) + ) + ) + + (!type struct 1548 nil gc_used "poly_int_cst_hasher" + (!srcfileloc "tree.cc" 220) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1549 + (!type pointer 1550 nil gc_used + (!type already_seen 1549) + ) + gc_pointed_to "hash_table" + (!srcfileloc "tree.cc" 220) + (!fields 1 + (!pair "poly_int_cst_hasher" + (!type already_seen 1548) + (!srcfileloc "tree.cc" 220) + nil ) + ) + ) + + (!type struct 1551 nil gc_used "cl_option_hasher" + (!srcfileloc "tree.cc" 236) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1552 + (!type pointer 1553 nil gc_used + (!type already_seen 1552) + ) + gc_pointed_to "hash_table" + (!srcfileloc "tree.cc" 236) + (!fields 1 + (!pair "cl_option_hasher" + (!type already_seen 1551) + (!srcfileloc "tree.cc" 236) + nil ) + ) + ) + + (!type user_struct 1554 + (!type pointer 1555 nil gc_used + (!type already_seen 1554) + ) + gc_pointed_to "hash_table" + (!srcfileloc "tree.cc" 242) + (!fields 1 + (!pair "tree_decl_map_cache_hasher" + (!type already_seen 928) + (!srcfileloc "tree.cc" 242) + nil ) + ) + ) + + (!type user_struct 1556 + (!type pointer 1557 nil gc_used + (!type already_seen 1556) + ) + gc_pointed_to "hash_table" + (!srcfileloc "tree.cc" 248) + (!fields 1 + (!pair "tree_vec_map_cache_hasher" + (!type already_seen 929) + (!srcfileloc "tree.cc" 248) + nil ) + ) + ) + + (!type struct 1558 nil gc_unused "addr_const"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1559 nil gc_used "section_hasher" + (!srcfileloc "varasm.cc" 193) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1560 + (!type pointer 1561 nil gc_used + (!type already_seen 1560) + ) + gc_pointed_to "hash_table" + (!srcfileloc "varasm.cc" 193) + (!fields 1 + (!pair "section_hasher" + (!type already_seen 1559) + (!srcfileloc "varasm.cc" 193) + nil ) + ) + ) + + (!type struct 1562 nil gc_used "object_block_hasher" + (!srcfileloc "varasm.cc" 204) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1563 + (!type pointer 1564 nil gc_used + (!type already_seen 1563) + ) + gc_pointed_to "hash_table" + (!srcfileloc "varasm.cc" 204) + (!fields 1 + (!pair "object_block_hasher" + (!type already_seen 1562) + (!srcfileloc "varasm.cc" 204) + nil ) + ) + ) + + (!type struct 1565 nil gc_unused "asm_int_op"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1566 + (!type pointer 1567 nil gc_used + (!type already_seen 1566) + ) + gc_pointed_to "hash_table" + (!srcfileloc "varasm.cc" 3093) + (!fields 1 + (!pair "tree_descriptor_hasher" + (!type already_seen 1091) + (!srcfileloc "varasm.cc" 3093) + nil ) + ) + ) + + (!type already_seen 1007) + + (!type already_seen 1006) + + (!type struct 1568 nil gc_unused "constant_descriptor_rtx_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1569 nil gc_unused "const_rtx_data_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1570 nil gc_unused "oc_outer_state"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1571 nil gc_unused "oc_local_state"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1572 nil gc_used "tm_clone_hasher" + (!srcfileloc "varasm.cc" 6374) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1573 + (!type pointer 1574 nil gc_used + (!type already_seen 1573) + ) + gc_pointed_to "hash_table" + (!srcfileloc "varasm.cc" 6374) + (!fields 1 + (!pair "tm_clone_hasher" + (!type already_seen 1572) + (!srcfileloc "varasm.cc" 6374) + nil ) + ) + ) + + (!type struct 1575 nil gc_unused "tm_alias_pair"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 403) + + (!type already_seen 763) + + (!type already_seen 761) + + (!type already_seen 771) + + (!type already_seen 769) + + (!type already_seen 385) + + (!type already_seen 776) + + (!type already_seen 797) + + (!type already_seen 786) + + (!type struct 1576 nil gc_pointed_to "gimple_statement_wce" + (!srcfileloc "gimple.h" 560) + (!fields 1 + (!pair "cleanup" + (!type already_seen 281) + (!srcfileloc "gimple.h" 559) + nil ) + ) + (!options + (!option tag string "GSS_WCE") + ) + 1023 nil + (!type already_seen 282) + ) + + (!type already_seen 802) + + (!type already_seen 805) + + (!type already_seen 804) + + (!type already_seen 799) + + (!type struct 1577 nil gc_pointed_to "gomp_scan" + (!srcfileloc "gimple.h" 783) + (!fields 0 ) + (!options + (!option tag string "GSS_OMP_SINGLE_LAYOUT") + ) + 1023 nil + (!type already_seen 799) + ) + + (!type already_seen 794) + + (!type struct 1578 nil gc_pointed_to "gimple_statement_omp_return" + (!srcfileloc "gimple.h" 825) + (!fields 0 ) + (!options + (!option tag string "GSS_OMP_ATOMIC_STORE_LAYOUT") + ) + 1023 nil + (!type already_seen 794) + ) + + (!type struct 1579 nil gc_unused "gimple_temp_hash_elt"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1580 nil gc_pointed_to "tm_restart_node" + (!srcfileloc "gimple-ssa.h" 32) + (!fields 2 + (!pair "stmt" + (!type already_seen 281) + (!srcfileloc "gimple-ssa.h" 30) + nil ) + (!pair "label_or_list" + (!type already_seen 23) + (!srcfileloc "gimple-ssa.h" 31) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type already_seen 406) + + (!type already_seen 397) + + (!type already_seen 394) + + (!type already_seen 396) + + (!type already_seen 398) + + (!type already_seen 405) + + (!type struct 1581 nil gc_unused "pass_release_ssa_names"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type union 1582 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/tree-eh.cc:53"nil + (!fields 0 ) + nil 0 nil ) + + (!type struct 1583 nil gc_unused "finally_tree_node"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1584 nil gc_unused "finally_tree_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1585 nil gc_unused "goto_queue_node"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1586 nil gc_unused "leh_state"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1587 nil gc_unused "leh_tf_state"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1588 nil gc_unused "labels_s"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1589 nil gc_unused "pass_lower_eh"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1590 nil gc_unused "pass_refactor_eh"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1591 nil gc_unused "pass_lower_resx"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1592 nil gc_unused "pass_lower_eh_dispatch"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1593 nil gc_unused "pass_cleanup_eh"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1594 nil gc_used "mem_addr_template" + (!srcfileloc "tree-ssa-address.cc" 95) + (!fields 3 + (!pair "ref" + (!type already_seen 99) + (!srcfileloc "tree-ssa-address.cc" 83) + nil ) + (!pair "step_p" + (!type already_seen 100) + (!srcfileloc "tree-ssa-address.cc" 84) + (!options + (!option skip string "") + ) + ) + (!pair "off_p" + (!type already_seen 100) + (!srcfileloc "tree-ssa-address.cc" 86) + (!options + (!option skip string "") + ) + ) + ) + nil 1023 nil nil ) + + (!type user_struct 1595 + (!type pointer 1596 nil gc_used + (!type already_seen 1595) + ) + gc_pointed_to "vec" + (!srcfileloc "tree-ssa-address.cc" 95) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "tree-ssa-address.cc" 95) + nil ) + (!pair "mem_addr_template" + (!type already_seen 1594) + (!srcfileloc "tree-ssa-address.cc" 95) + nil ) + ) + ) + + (!type struct 1597 nil gc_unused "mem_address"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1598 nil gc_unused "cfg_stats_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1599 nil gc_unused "replace_decls_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1600 nil gc_unused "locus_discrim_map"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1601 nil gc_unused "locus_discrim_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1602 nil gc_unused "pass_build_cfg"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1603 nil gc_unused "omp_region"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1604 nil gc_unused "label_record"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1605 nil gc_unused "walk_stmt_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1606 nil gc_unused "move_stmt_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1607 nil gc_unused "profile_record"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1608 nil gc_unused "pass_split_crit_edges"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1609 nil gc_unused "pass_warn_function_return"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1610 nil gc_unused "pass_warn_unused_result"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1611 nil gc_unused "pass_fixup_cfg"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1612 nil gc_unused "iv_use"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1613 nil gc_unused "iv"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1614 nil gc_unused "version_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1615 nil gc_unused "comp_cost"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1616 nil gc_unused "iv_inv_expr_ent"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1617 nil gc_unused "cost_pair"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1618 nil gc_unused "iv_group"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1619 nil gc_unused "iv_cand"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1620 nil gc_unused "iv_common_cand"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1621 nil gc_unused "iv_common_cand_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1622 nil gc_unused "iv_inv_expr_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1623 nil gc_unused "ivopts_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1624 nil gc_unused "iv_ca"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1625 nil gc_unused "iv_ca_delta"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1626 nil gc_unused "tree_niter_desc"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1627 nil gc_unused "ifs_ivopts_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1628 nil gc_unused "walk_tree_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1629 nil gc_unused "aff_tree"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1630 nil gc_unused "ainc_cost_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1631 nil gc_unused "dfa_stats_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1632 nil gc_unused "numbered_tree" + (!srcfileloc "tree-dfa.cc" 967) + (!fields 2 + (!pair "t" + (!type already_seen 23) + (!srcfileloc "tree-dfa.cc" 965) + nil ) + (!pair "num" + (!type already_seen 2) + (!srcfileloc "tree-dfa.cc" 966) + nil ) + ) + nil 1023 nil nil ) + + (!type struct 1633 nil gc_pointed_to "scev_info_str" + (!srcfileloc "tree-scalar-evolution.cc" 300) + (!fields 3 + (!pair "name_version" + (!type already_seen 2) + (!srcfileloc "tree-scalar-evolution.cc" 297) + nil ) + (!pair "instantiated_below" + (!type already_seen 2) + (!srcfileloc "tree-scalar-evolution.cc" 298) + nil ) + (!pair "chrec" + (!type already_seen 23) + (!srcfileloc "tree-scalar-evolution.cc" 299) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1634 nil gc_used "scev_info_hasher" + (!srcfileloc "tree-scalar-evolution.cc" 312) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1635 + (!type pointer 1636 nil gc_used + (!type already_seen 1635) + ) + gc_pointed_to "hash_table" + (!srcfileloc "tree-scalar-evolution.cc" 312) + (!fields 1 + (!pair "scev_info_hasher" + (!type already_seen 1634) + (!srcfileloc "tree-scalar-evolution.cc" 312) + nil ) + ) + ) + + (!type struct 1637 nil gc_unused "instantiate_cache_type"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1638 nil gc_unused "chrec_stats"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 400) + + (!type struct 1639 nil gc_unused "pass_ipa_tree_profile"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1640 nil gc_unused "nesting_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1641 nil gc_unused "nesting_copy_body_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1642 nil gc_unused "omp_for_data_loop"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1643 nil gc_unused "known_properties"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1644 nil gc_unused "declare_variant_simd_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1645 nil gc_used "omp_declare_variant_entry" + (!srcfileloc "omp-general.cc" 2052) + (!fields 5 + (!pair "variant" + (!type already_seen 812) + (!srcfileloc "omp-general.cc" 2033) + nil ) + (!pair "score" + (!type already_seen 2) + (!srcfileloc "omp-general.cc" 2035) + nil ) + (!pair "score_in_declare_simd_clone" + (!type already_seen 2) + (!srcfileloc "omp-general.cc" 2037) + nil ) + (!pair "ctx" + (!type already_seen 23) + (!srcfileloc "omp-general.cc" 2039) + nil ) + (!pair "matches" + (!type already_seen 2) + (!srcfileloc "omp-general.cc" 2041) + nil ) + ) + nil 1023 nil nil ) + + (!type user_struct 1646 + (!type pointer 1647 nil gc_used + (!type already_seen 1646) + ) + gc_pointed_to "vec" + (!srcfileloc "omp-general.cc" 2052) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "omp-general.cc" 2052) + nil ) + (!pair "omp_declare_variant_entry" + (!type already_seen 1645) + (!srcfileloc "omp-general.cc" 2052) + nil ) + ) + ) + + (!type struct 1648 nil gc_pointed_to "omp_declare_variant_base_entry" + (!srcfileloc "omp-general.cc" 2053) + (!fields 3 + (!pair "base" + (!type already_seen 812) + (!srcfileloc "omp-general.cc" 2047) + nil ) + (!pair "node" + (!type already_seen 812) + (!srcfileloc "omp-general.cc" 2050) + nil ) + (!pair "variants" + (!type already_seen 1647) + (!srcfileloc "omp-general.cc" 2052) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1649 nil gc_used "omp_declare_variant_hasher" + (!srcfileloc "omp-general.cc" 2101) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1650 + (!type pointer 1651 nil gc_used + (!type already_seen 1650) + ) + gc_pointed_to "hash_table" + (!srcfileloc "omp-general.cc" 2101) + (!fields 1 + (!pair "omp_declare_variant_hasher" + (!type already_seen 1649) + (!srcfileloc "omp-general.cc" 2101) + nil ) + ) + ) + + (!type struct 1652 nil gc_used "omp_declare_variant_alt_hasher" + (!srcfileloc "omp-general.cc" 2123) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1653 + (!type pointer 1654 nil gc_used + (!type already_seen 1653) + ) + gc_pointed_to "hash_table" + (!srcfileloc "omp-general.cc" 2123) + (!fields 1 + (!pair "omp_declare_variant_alt_hasher" + (!type already_seen 1652) + (!srcfileloc "omp-general.cc" 2123) + nil ) + ) + ) + + (!type struct 1655 nil gc_unused "omp_context"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1656 nil gc_unused "omp_for_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1657 nil gc_unused "omplow_simd_context"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1658 nil gc_unused "omp_taskcopy_context"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1659 nil gc_unused "lower_omp_regimplify_operands_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1660 nil gc_unused "pass_lower_omp"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1661 nil gc_unused "pass_diagnose_omp_blocks"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1662 nil gc_unused "cl_option_state"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1663 nil gc_unused "scratch_reg"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1664 nil gc_unused "ix86_address"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1665 nil gc_used "dllimport_hasher" + (!srcfileloc "config/i386/i386.cc" 11835) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1666 + (!type pointer 1667 nil gc_used + (!type already_seen 1666) + ) + gc_pointed_to "hash_table" + (!srcfileloc "config/i386/i386.cc" 11835) + (!fields 1 + (!pair "dllimport_hasher" + (!type already_seen 1665) + (!srcfileloc "config/i386/i386.cc" 11835) + nil ) + ) + ) + + (!type struct 1668 nil gc_unused "recog_data_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1669 nil gc_unused "ix86_vector_costs"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1670 nil gc_unused "gcc_target"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1671 nil gc_unused "dump_file_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1672 nil gc_unused "pass_build_ssa_passes"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1673 nil gc_unused "pass_local_optimization_passes"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1674 nil gc_unused "pass_ipa_remove_symbols"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1675 nil gc_unused "pass_all_early_optimizations"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1676 nil gc_unused "pass_all_optimizations"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1677 nil gc_unused "pass_all_optimizations_g"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1678 nil gc_unused "pass_rest_of_compilation"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1679 nil gc_unused "pass_postreload"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1680 nil gc_unused "pass_late_compilation"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1681 nil gc_unused "pass_pre_slp_scalar_cleanup"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1682 + (!type pointer 1683 nil gc_unused + (!type already_seen 1682) + ) + gc_unused "uid_range"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1684 nil gc_unused "pass_list_node"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1685 + (!type pointer 1686 nil gc_unused + (!type already_seen 1685) + ) + gc_unused "lto_out_decl_state"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1687 + (!type pointer 1688 nil gc_used + (!type already_seen 1687) + ) + gc_pointed_to "hash_map" + (!srcfileloc "cgraphclones.cc" 466) + (!fields 1 + (!pair "unsigned" + (!type already_seen 1404) + (!srcfileloc "cgraphclones.cc" 466) + nil ) + ) + ) + + (!type user_struct 1689 + (!type pointer 1690 nil gc_used + (!type already_seen 1689) + ) + gc_pointed_to "vec" + (!srcfileloc "tree-phinodes.cc" 70) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "tree-phinodes.cc" 70) + nil ) + (!pair "gimple" + (!type already_seen 281) + (!srcfileloc "tree-phinodes.cc" 70) + nil ) + ) + ) + + (!type struct 1691 nil gc_unused "ao_ref"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type undefined 1692 nil gc_unused "TRAILING_WIDE_INT_ACCESSOR" + (!srcfileloc "tree-ssanames.h" 52) + ) + + (!type struct 1693 nil gc_unused "assert_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1694 nil gc_unused "irange" + (!srcfileloc "value-range.h" 142) + (!fields 0 ) + ) + + (!type user_struct 1695 nil gc_unused "int_range" + (!srcfileloc "value-range.h" 172) + (!fields 0 ) + ) + + (!type already_seen 1135) + + (!type already_seen 1136) + + (!type user_struct 1696 nil gc_unused "int_range<255>" + (!srcfileloc "value-range.h" 186) + (!fields 1 + (!pair "255" + (!type undefined 1697 nil gc_unused "255" + (!srcfileloc "value-range.h" 186) + ) + (!srcfileloc "value-range.h" 186) + nil ) + ) + ) + + (!type already_seen 1697) + + (!type struct 1698 nil gc_unused "irange_allocator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 1138) + + (!type already_seen 1130) + + (!type already_seen 1139) + + (!type already_seen 1129) + + (!type already_seen 1127) + + (!type already_seen 1133) + + (!type struct 1699 nil gc_used "ipa_vr" + (!srcfileloc "ipa-prop.h" 910) + (!fields 4 + (!pair "known" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 304) + nil ) + (!pair "type" + (!type already_seen 2) + (!srcfileloc "ipa-prop.h" 305) + nil ) + (!pair "min" + (!type already_seen 497) + (!srcfileloc "ipa-prop.h" 306) + nil ) + (!pair "max" + (!type already_seen 497) + (!srcfileloc "ipa-prop.h" 307) + nil ) + ) + nil 1023 nil nil ) + + (!type already_seen 1137) + + (!type struct 1700 nil gc_unused "ipa_auto_call_arg_values"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1701 nil gc_unused "ipa_call_arg_values"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 1146) + + (!type already_seen 1145) + + (!type already_seen 1149) + + (!type already_seen 1150) + + (!type user_struct 1702 + (!type pointer 1703 nil gc_used + (!type already_seen 1702) + ) + gc_pointed_to "vec" + (!srcfileloc "ipa-prop.h" 908) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-prop.h" 908) + nil ) + (!pair "ipa_bits" + (!type already_seen 1132) + (!srcfileloc "ipa-prop.h" 908) + nil ) + ) + ) + + (!type user_struct 1704 + (!type pointer 1705 nil gc_used + (!type already_seen 1704) + ) + gc_pointed_to "vec" + (!srcfileloc "ipa-prop.h" 910) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-prop.h" 910) + nil ) + (!pair "ipa_vr" + (!type already_seen 1699) + (!srcfileloc "ipa-prop.h" 910) + nil ) + ) + ) + + (!type struct 1706 + (!type pointer 1707 nil gc_used + (!type already_seen 1706) + ) + gc_pointed_to "ipcp_transformation" + (!srcfileloc "ipa-prop.h" 930) + (!fields 3 + (!pair "agg_values" + (!type already_seen 1161) + (!srcfileloc "ipa-prop.h" 906) + nil ) + (!pair "bits" + (!type already_seen 1703) + (!srcfileloc "ipa-prop.h" 908) + nil ) + (!pair "m_vr" + (!type already_seen 1705) + (!srcfileloc "ipa-prop.h" 910) + nil ) + ) + nil 1023 nil nil ) + + (!type already_seen 1157) + + (!type already_seen 1159) + + (!type user_struct 1708 + (!type pointer 1709 nil gc_used + (!type already_seen 1708) + ) + gc_pointed_to "ipa_node_params_t" + (!srcfileloc "ipa-prop.h" 1011) + (!fields 0 ) + ) + + (!type user_struct 1710 + (!type pointer 1711 nil gc_used + (!type already_seen 1710) + ) + gc_pointed_to "ipa_edge_args_sum_t" + (!srcfileloc "ipa-prop.h" 1033) + (!fields 0 ) + ) + + (!type struct 1712 nil gc_unused "ipcp_transformation_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1713 + (!type pointer 1714 nil gc_used + (!type already_seen 1713) + ) + gc_pointed_to "function_summary" + (!srcfileloc "ipa-prop.h" 1065) + (!fields 1 + (!pair "ipcp_transformation" + (!type already_seen 1707) + (!srcfileloc "ipa-prop.h" 1065) + nil ) + ) + ) + + (!type struct 1715 nil gc_used "tm_wrapper_hasher" + (!srcfileloc "trans-mem.cc" 468) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1716 + (!type pointer 1717 nil gc_used + (!type already_seen 1716) + ) + gc_pointed_to "hash_table" + (!srcfileloc "trans-mem.cc" 468) + (!fields 1 + (!pair "tm_wrapper_hasher" + (!type already_seen 1715) + (!srcfileloc "trans-mem.cc" 468) + nil ) + ) + ) + + (!type struct 1718 nil gc_unused "diagnose_tm"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1719 nil gc_unused "pass_diagnose_tm_blocks"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1720 nil gc_unused "tm_log_entry"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1721 nil gc_unused "log_entry_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1722 nil gc_unused "tm_new_mem_map"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1723 nil gc_unused "tm_mem_map_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1724 nil gc_unused "pass_lower_tm"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1725 nil gc_unused "tm_region"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1726 nil gc_unused "pass_tm_init"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1727 nil gc_unused "bb2reg_stuff"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1728 nil gc_unused "pass_tm_mark"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1729 nil gc_unused "pass_tm_edges"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1730 nil gc_unused "tm_memop"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1731 nil gc_unused "tm_memop_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1732 nil gc_unused "tm_memopt_bitmaps"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1733 nil gc_unused "pass_tm_memopt"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1734 nil gc_unused "tm_ipa_cg_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1735 nil gc_unused "vec" + (!srcfileloc "trans-mem.cc" 4184) + (!fields 1 + (!pair "cgraph_node" + (!type already_seen 812) + (!srcfileloc "trans-mem.cc" 4184) + nil ) + ) + ) + + (!type struct 1736 nil gc_unused "demangle_component"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1737 nil gc_unused "create_version_alias_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1738 nil gc_unused "pass_ipa_tm"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1739 nil gc_unused "lto_location_cache"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1740 nil gc_unused "lto_input_block"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 340) + + (!type struct 1741 nil gc_unused "lto_simple_header"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1742 nil gc_unused "lto_simple_header_with_strings"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1743 nil gc_unused "lto_function_header"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1744 nil gc_unused "lto_decl_header"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1745 nil gc_unused "lto_stats_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1746 nil gc_unused "lto_encoder_entry"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 328) + + (!type struct 1747 nil gc_unused "lto_symtab_encoder_iterator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1748 nil gc_unused "lto_tree_ref_encoder"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 331) + + (!type already_seen 335) + + (!type already_seen 330) + + (!type already_seen 334) + + (!type already_seen 336) + + (!type already_seen 338) + + (!type already_seen 339) + + (!type struct 1749 nil gc_unused "lto_char_ptr_base"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1750 nil gc_unused "lto_output_stream"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1751 nil gc_unused "lto_simple_output_block"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1752 nil gc_unused "string_slot"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1753 nil gc_unused "string_slot_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1754 nil gc_unused "dref_entry"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 582) + + (!type already_seen 584) + + (!type already_seen 593) + + (!type already_seen 595) + + (!type already_seen 601) + + (!type already_seen 611) + + (!type already_seen 613) + + (!type already_seen 615) + + (!type already_seen 619) + + (!type already_seen 621) + + (!type already_seen 1184) + + (!type already_seen 1183) + + (!type already_seen 1181) + + (!type struct 1755 nil gc_unused "inline_param_summary"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 1180) + + (!type already_seen 1193) + + (!type struct 1756 nil gc_unused "agg_position_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 1187) + + (!type struct 1757 nil gc_unused "ipa_size_summary"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 1191) + + (!type already_seen 1186) + + (!type already_seen 1188) + + (!type already_seen 1190) + + (!type already_seen 1194) + + (!type user_struct 1758 nil gc_unused "ipa_fn_summary_t" + (!srcfileloc "ipa-fnsummary.h" 247) + (!fields 0 ) + ) + + (!type user_struct 1759 + (!type pointer 1760 nil gc_used + (!type already_seen 1759) + ) + gc_pointed_to "fast_function_summary" + (!srcfileloc "ipa-fnsummary.h" 249) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-fnsummary.h" 249) + nil ) + (!pair "ipa_fn_summary" + (!type already_seen 1178) + (!srcfileloc "ipa-fnsummary.h" 249) + nil ) + ) + ) + + (!type struct 1761 nil gc_unused "ipa_size_summary_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1762 nil gc_unused "ipa_call_summary_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1763 nil gc_unused "ipa_call_estimates"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1764 nil gc_unused "ipa_cached_call_context"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1765 nil gc_unused "ipa_call_context"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1766 nil gc_unused "vtable_registration"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1767 nil gc_unused "vtbl_map_hasher" + (!srcfileloc "vtable-verify.cc" 298) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1768 nil gc_unused "hash_table" + (!srcfileloc "vtable-verify.cc" 298) + (!fields 1 + (!pair "vtbl_map_hasher" + (!type already_seen 1767) + (!srcfileloc "vtable-verify.cc" 298) + nil ) + ) + ) + + (!type undefined 1769 nil gc_unused "vtbl_map_table_type::iterator" + (!srcfileloc "vtable-verify.cc" 299) + ) + + (!type struct 1770 nil gc_unused "vtbl_map_node"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1771 nil gc_unused "pass_vtable_verify"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1772 nil gc_unused "hwasan_stack_var"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1773 nil gc_unused "asan_mem_ref"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1774 nil gc_unused "asan_mem_ref_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1775 nil gc_unused "asan_redzone_buffer"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1776 nil gc_unused "asan_add_string_csts_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1777 nil gc_unused "pass_asan"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1778 nil gc_unused "pass_asan_O0"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1779 nil gc_pointed_to "tree_type_map" + (!srcfileloc "ubsan.cc" 58) + (!fields 2 + (!pair "type" + (!type already_seen 974) + (!srcfileloc "ubsan.cc" 56) + nil ) + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "ubsan.cc" 57) + nil ) + ) + (!options + (!option for_user string "") + ) + 1023 nil nil ) + + (!type struct 1780 nil gc_used "tree_type_map_cache_hasher" + (!srcfileloc "ubsan.cc" 82) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1781 + (!type pointer 1782 nil gc_used + (!type already_seen 1781) + ) + gc_pointed_to "hash_table" + (!srcfileloc "ubsan.cc" 82) + (!fields 1 + (!pair "tree_type_map_cache_hasher" + (!type already_seen 1780) + (!srcfileloc "ubsan.cc" 82) + nil ) + ) + ) + + (!type struct 1783 nil gc_unused "pass_ubsan"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1784 nil gc_unused "pass_tsan"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1785 nil gc_unused "pass_tsan_O0"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1786 nil gc_unused "sanopt_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1787 nil gc_unused "sanopt_tree_triplet"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1788 nil gc_unused "sanopt_tree_triplet_hash"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1789 nil gc_unused "sanopt_tree_couple"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1790 nil gc_unused "sanopt_tree_couple_hash"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1791 nil gc_unused "sanopt_ctx"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1792 nil gc_unused "pass_sanopt"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1793 nil gc_unused "type_pair"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1794 nil gc_unused "default_hash_traits"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 1204) + + (!type struct 1795 nil gc_unused "odr_name_hasher" + (!srcfileloc "ipa-devirt.cc" 491) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1796 nil gc_unused "hash_table" + (!srcfileloc "ipa-devirt.cc" 491) + (!fields 1 + (!pair "odr_name_hasher" + (!type already_seen 1795) + (!srcfileloc "ipa-devirt.cc" 491) + nil ) + ) + ) + + (!type user_struct 1797 + (!type pointer 1798 nil gc_used + (!type already_seen 1797) + ) + gc_pointed_to "vec" + (!srcfileloc "ipa-devirt.cc" 498) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ipa-devirt.cc" 498) + nil ) + (!pair "odr_type" + (!type already_seen 1203) + (!srcfileloc "ipa-devirt.cc" 498) + nil ) + ) + ) + + (!type struct 1799 nil gc_unused "odr_enum_val"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1800 nil gc_unused "odr_enum"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1801 nil gc_unused "polymorphic_call_target_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1802 nil gc_unused "polymorphic_call_target_hasher" + (!srcfileloc "ipa-devirt.cc" 2732) + (!fields 0 ) + nil 1023 nil nil ) + + (!type user_struct 1803 nil gc_unused "hash_table" + (!srcfileloc "ipa-devirt.cc" 2732) + (!fields 1 + (!pair "polymorphic_call_target_hasher" + (!type already_seen 1802) + (!srcfileloc "ipa-devirt.cc" 2732) + nil ) + ) + ) + + (!type struct 1804 nil gc_unused "odr_type_warn_count"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1805 nil gc_unused "decl_warn_count"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1806 nil gc_unused "final_warning_record"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1807 nil gc_unused "pass_ipa_devirt"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1808 nil gc_unused "pass_ipa_odr"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1809 nil gc_unused "direct_internal_fn_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1810 nil gc_unused "arg_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1811 nil gc_unused "arg"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1812 nil gc_unused "builtin_isa"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1813 nil gc_unused "expand_vec_perm_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1814 nil gc_unused "ix86_target_opts"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1815 nil gc_unused "stringop_size_range"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1816 nil gc_unused "File_Info_Type"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1817 nil gc_unused "Elist_Header"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1818 nil gc_unused "Elmt_Item"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1819 nil gc_unused "String_Entry"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1820 nil gc_unused "List_Header"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1821 nil gc_unused "attrib"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1822 nil gc_unused "incomplete"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1823 nil gc_unused "subst_pair_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1824 nil gc_unused "variant_desc_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1825 nil gc_used "value_annotation_hasher" + (!srcfileloc "ada/gcc-interface/decl.cc" 157) + (!fields 0 ) + nil 1 nil nil ) + + (!type user_struct 1826 + (!type pointer 1827 nil gc_used + (!type already_seen 1826) + ) + gc_pointed_to "hash_table" + (!srcfileloc "ada/gcc-interface/decl.cc" 157) + (!fields 1 + (!pair "value_annotation_hasher" + (!type already_seen 1825) + (!srcfileloc "ada/gcc-interface/decl.cc" 157) + nil ) + ) + ) + + (!type user_struct 1828 + (!type pointer 1829 nil gc_used + (!type already_seen 1828) + ) + gc_pointed_to "vec" + (!srcfileloc "ada/gcc-interface/decl.cc" 163) + (!fields 2 + (!pair "va_gc_atomic" + (!type undefined 1830 nil gc_unused "va_gc_atomic" + (!srcfileloc "ada/gcc-interface/decl.cc" 163) + ) + (!srcfileloc "ada/gcc-interface/decl.cc" 163) + nil ) + (!pair "Entity_Id" + (!type undefined 1831 nil gc_unused "Entity_Id" + (!srcfileloc "ada/gcc-interface/decl.cc" 163) + ) + (!srcfileloc "ada/gcc-interface/decl.cc" 163) + nil ) + ) + ) + + (!type already_seen 1831) + + (!type already_seen 1830) + + (!type struct 1832 nil gc_pointed_to "tree_entity_vec_map" + (!srcfileloc "ada/gcc-interface/decl.cc" 164) + (!fields 2 + (!pair "base" + (!type already_seen 974) + (!srcfileloc "ada/gcc-interface/decl.cc" 162) + nil ) + (!pair "to" + (!type already_seen 1829) + (!srcfileloc "ada/gcc-interface/decl.cc" 163) + nil ) + ) + (!options + (!option for_user string "") + ) + 1 nil nil ) + + (!type struct 1833 nil gc_used "dummy_type_hasher" + (!srcfileloc "ada/gcc-interface/decl.cc" 198) + (!fields 0 ) + nil 1 nil nil ) + + (!type user_struct 1834 + (!type pointer 1835 nil gc_used + (!type already_seen 1834) + ) + gc_pointed_to "hash_table" + (!srcfileloc "ada/gcc-interface/decl.cc" 198) + (!fields 1 + (!pair "dummy_type_hasher" + (!type already_seen 1833) + (!srcfileloc "ada/gcc-interface/decl.cc" 198) + nil ) + ) + ) + + (!type struct 1836 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/ada/gcc-interface/decl.cc:261"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1837 nil gc_unused "er_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1838 nil gc_unused "vinfo"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 467) + + (!type already_seen 465) + + (!type struct 1839 + (!type pointer 1840 nil gc_used + (!type already_seen 1839) + ) + gc_pointed_to "stmt_group" + (!srcfileloc "ada/gcc-interface/trans.cc" 145) + (!fields 4 + (!pair "previous" + (!type already_seen 1840) + (!srcfileloc "ada/gcc-interface/trans.cc" 141) + nil ) + (!pair "stmt_list" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 142) + nil ) + (!pair "block" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 143) + nil ) + (!pair "cleanups" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 144) + nil ) + ) + (!options + (!option chain_next string "%h.previous") + ) + 1 nil nil ) + + (!type struct 1841 + (!type pointer 1842 nil gc_used + (!type already_seen 1841) + ) + gc_pointed_to "elab_info" + (!srcfileloc "ada/gcc-interface/trans.cc" 161) + (!fields 3 + (!pair "next" + (!type already_seen 1842) + (!srcfileloc "ada/gcc-interface/trans.cc" 158) + nil ) + (!pair "elab_proc" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 159) + nil ) + (!pair "gnat_node" + (!type already_seen 2) + (!srcfileloc "ada/gcc-interface/trans.cc" 160) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 1 nil nil ) + + (!type struct 1843 + (!type pointer 1844 nil gc_used + (!type already_seen 1843) + ) + gc_pointed_to "range_check_info_d" + (!srcfileloc "ada/gcc-interface/trans.cc" 193) + (!fields 7 + (!pair "low_bound" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 186) + nil ) + (!pair "high_bound" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 187) + nil ) + (!pair "disp" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 188) + nil ) + (!pair "neg_p" + (!type already_seen 2) + (!srcfileloc "ada/gcc-interface/trans.cc" 189) + nil ) + (!pair "type" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 190) + nil ) + (!pair "invariant_cond" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 191) + nil ) + (!pair "inserted_cond" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 192) + nil ) + ) + nil 1 nil nil ) + + (!type user_struct 1845 + (!type pointer 1846 nil gc_used + (!type already_seen 1845) + ) + gc_pointed_to "vec" + (!srcfileloc "ada/gcc-interface/trans.cc" 207) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ada/gcc-interface/trans.cc" 207) + nil ) + (!pair "range_check_info" + (!type already_seen 1844) + (!srcfileloc "ada/gcc-interface/trans.cc" 207) + nil ) + ) + ) + + (!type struct 1847 + (!type pointer 1848 nil gc_used + (!type already_seen 1847) + ) + gc_pointed_to "loop_info_d" + (!srcfileloc "ada/gcc-interface/trans.cc" 209) + (!fields 10 + (!pair "fndecl" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 199) + nil ) + (!pair "stmt" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 200) + nil ) + (!pair "loop_var" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 201) + nil ) + (!pair "low_bound" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 202) + nil ) + (!pair "high_bound" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 203) + nil ) + (!pair "omp_loop_clauses" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 204) + nil ) + (!pair "omp_construct_clauses" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 205) + nil ) + (!pair "omp_code" + (!type already_seen 2) + (!srcfileloc "ada/gcc-interface/trans.cc" 206) + nil ) + (!pair "checks" + (!type already_seen 1846) + (!srcfileloc "ada/gcc-interface/trans.cc" 207) + nil ) + (!pair "invariants" + (!type already_seen 84) + (!srcfileloc "ada/gcc-interface/trans.cc" 208) + nil ) + ) + nil 1 nil nil ) + + (!type user_struct 1849 + (!type pointer 1850 nil gc_used + (!type already_seen 1849) + ) + gc_pointed_to "vec" + (!srcfileloc "ada/gcc-interface/trans.cc" 214) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "ada/gcc-interface/trans.cc" 214) + nil ) + (!pair "loop_info" + (!type already_seen 1848) + (!srcfileloc "ada/gcc-interface/trans.cc" 214) + nil ) + ) + ) + + (!type struct 1851 nil gc_unused "nrv_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1852 + (!type pointer 1853 nil gc_used + (!type already_seen 1852) + ) + gc_pointed_to "gnat_binding_level" + (!srcfileloc "ada/gcc-interface/utils.cc" 263) + (!fields 3 + (!pair "chain" + (!type already_seen 1853) + (!srcfileloc "ada/gcc-interface/utils.cc" 257) + nil ) + (!pair "block" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/utils.cc" 259) + nil ) + (!pair "jmpbuf_decl" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/utils.cc" 262) + nil ) + ) + (!options + (!option chain_next string "%h.chain") + ) + 1 nil nil ) + + (!type struct 1854 nil gc_pointed_to "packable_type_hash" + (!srcfileloc "ada/gcc-interface/utils.cc" 290) + (!fields 2 + (!pair "hash" + (!type already_seen 2) + (!srcfileloc "ada/gcc-interface/utils.cc" 288) + nil ) + (!pair "type" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/utils.cc" 289) + nil ) + ) + (!options + (!option for_user string "") + ) + 1 nil nil ) + + (!type struct 1855 nil gc_used "packable_type_hasher" + (!srcfileloc "ada/gcc-interface/utils.cc" 304) + (!fields 0 ) + nil 1 nil nil ) + + (!type user_struct 1856 + (!type pointer 1857 nil gc_used + (!type already_seen 1856) + ) + gc_pointed_to "hash_table" + (!srcfileloc "ada/gcc-interface/utils.cc" 304) + (!fields 1 + (!pair "packable_type_hasher" + (!type already_seen 1855) + (!srcfileloc "ada/gcc-interface/utils.cc" 304) + nil ) + ) + ) + + (!type struct 1858 nil gc_pointed_to "pad_type_hash" + (!srcfileloc "ada/gcc-interface/utils.cc" 313) + (!fields 2 + (!pair "hash" + (!type already_seen 2) + (!srcfileloc "ada/gcc-interface/utils.cc" 311) + nil ) + (!pair "type" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/utils.cc" 312) + nil ) + ) + (!options + (!option for_user string "") + ) + 1 nil nil ) + + (!type struct 1859 nil gc_used "pad_type_hasher" + (!srcfileloc "ada/gcc-interface/utils.cc" 327) + (!fields 0 ) + nil 1 nil nil ) + + (!type user_struct 1860 + (!type pointer 1861 nil gc_used + (!type already_seen 1860) + ) + gc_pointed_to "hash_table" + (!srcfileloc "ada/gcc-interface/utils.cc" 327) + (!fields 1 + (!pair "pad_type_hasher" + (!type already_seen 1859) + (!srcfileloc "ada/gcc-interface/utils.cc" 327) + nil ) + ) + ) + + (!type struct 1862 nil gc_unused "deferred_decl_context_node"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1863 nil gc_unused "lang_hooks"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1864 nil gc_unused "c_expr"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1865 nil gc_unused "c_typespec"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1866 nil gc_unused "c_declspecs"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1867 nil gc_unused "c_arg_tag"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 462) + + (!type struct 1868 nil gc_unused "c_declarator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1869 nil gc_unused "c_type_name"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1870 nil gc_unused "c_parm"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1871 nil gc_unused "c_enum_contents"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 644) + + (!type struct 1872 nil gc_unused "c_struct_parse_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 453) + + (!type already_seen 643) + + (!type already_seen 641) + + (!type already_seen 640) + + (!type already_seen 637) + + (!type already_seen 628) + + (!type already_seen 646) + + (!type already_seen 650) + + (!type already_seen 648) + + (!type struct 1873 + (!type pointer 1874 nil gc_used + (!type already_seen 1873) + ) + gc_pointed_to "c_inline_static" + (!srcfileloc "c/c-decl.cc" 563) + (!fields 5 + (!pair "location" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 550) + nil ) + (!pair "function" + (!type already_seen 23) + (!srcfileloc "c/c-decl.cc" 553) + nil ) + (!pair "static_decl" + (!type already_seen 23) + (!srcfileloc "c/c-decl.cc" 556) + nil ) + (!pair "type" + (!type already_seen 2) + (!srcfileloc "c/c-decl.cc" 559) + nil ) + (!pair "next" + (!type already_seen 1874) + (!srcfileloc "c/c-decl.cc" 562) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 258 nil nil ) + + (!type already_seen 541) + + (!type struct 1875 nil gc_unused "fname_var_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1876 nil gc_unused "visibility_flags"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1877 nil gc_unused "tlist"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1878 nil gc_unused "tlist_cache"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1879 nil gc_unused "disabled_builtin"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1880 nil gc_unused "nonnull_arg_ctx"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 1881 + (!type pointer 1882 nil gc_used + (!type already_seen 1881) + ) + gc_pointed_to "vec" + (!srcfileloc "c-family/c-common.cc" 5851) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "c-family/c-common.cc" 5851) + nil ) + (!pair "const_char_p" + (!type already_seen 11) + (!srcfileloc "c-family/c-common.cc" 5851) + nil ) + ) + ) + + (!type user_struct 1883 + (!type pointer 1884 nil gc_used + (!type already_seen 1883) + ) + gc_pointed_to "vec" + (!srcfileloc "c-family/c-common.cc" 8250) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "c-family/c-common.cc" 8250) + nil ) + (!pair "tree_gc_vec" + (!type already_seen 84) + (!srcfileloc "c-family/c-common.cc" 8250) + nil ) + ) + ) + + (!type user_struct 1885 nil gc_unused "hash_set" + (!srcfileloc "c-family/c-common.cc" 9221) + (!fields 2 + (!pair "nofree_string_hash" + (!type undefined 1886 nil gc_unused "nofree_string_hash" + (!srcfileloc "c-family/c-common.cc" 9221) + ) + (!srcfileloc "c-family/c-common.cc" 9221) + nil ) + (!pair "false" + (!type already_seen 872) + (!srcfileloc "c-family/c-common.cc" 9221) + nil ) + ) + ) + + (!type already_seen 1886) + + (!type user_struct 1887 nil gc_unused "hash_map" + (!srcfileloc "c-family/c-common.cc" 9226) + (!fields 1 + (!pair "per_file_includes_t" + (!type pointer 1888 nil gc_unused + (!type struct 1889 + (!type already_seen 1888) + gc_unused "per_file_includes_t"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "c-family/c-common.cc" 9226) + nil ) + ) + ) + + (!type already_seen 1889) + + (!type struct 1890 nil gc_unused "c_common_resword"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 452) + + (!type struct 1891 nil gc_unused "c_fileinfo"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1892 nil gc_unused "substring_loc"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1893 nil gc_unused "bc_state"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type undefined 1894 nil gc_unused "wide_int_bitmask" + (!srcfileloc "c-family/c-common.h" 1193) + ) + + (!type struct 1895 nil gc_unused "c_omp_directive"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 678) + + (!type struct 1896 nil gc_unused "property_attribute_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1897 nil gc_used "lazy_hex_fp_value_struct" + (!srcfileloc "c-family/c-cppbuiltin.cc" 1745) + (!fields 4 + (!pair "hex_str" + (!type already_seen 11) + (!srcfileloc "c-family/c-cppbuiltin.cc" 1741) + nil ) + (!pair "mode" + (!type already_seen 2) + (!srcfileloc "c-family/c-cppbuiltin.cc" 1742) + nil ) + (!pair "digits" + (!type already_seen 2) + (!srcfileloc "c-family/c-cppbuiltin.cc" 1743) + nil ) + (!pair "fp_suffix" + (!type already_seen 11) + (!srcfileloc "c-family/c-cppbuiltin.cc" 1744) + nil ) + ) + nil 774 nil nil ) + + (!type union 1898 nil gc_unused "gen_pragma_handler"nil + (!fields 0 ) + nil 0 nil ) + + (!type struct 1899 nil gc_unused "internal_pragma_handler"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1900 + (!type pointer 1901 nil gc_used + (!type already_seen 1900) + ) + gc_pointed_to "align_stack" + (!srcfileloc "c-family/c-pragma.cc" 51) + (!fields 3 + (!pair "alignment" + (!type already_seen 2) + (!srcfileloc "c-family/c-pragma.cc" 48) + nil ) + (!pair "id" + (!type already_seen 23) + (!srcfileloc "c-family/c-pragma.cc" 49) + nil ) + (!pair "prev" + (!type already_seen 1901) + (!srcfileloc "c-family/c-pragma.cc" 50) + nil ) + ) + nil 774 nil nil ) + + (!type struct 1902 nil gc_used "pending_weak" + (!srcfileloc "c-family/c-pragma.cc" 248) + (!fields 2 + (!pair "name" + (!type already_seen 23) + (!srcfileloc "c-family/c-pragma.cc" 243) + nil ) + (!pair "value" + (!type already_seen 23) + (!srcfileloc "c-family/c-pragma.cc" 244) + nil ) + ) + nil 774 nil nil ) + + (!type user_struct 1903 + (!type pointer 1904 nil gc_used + (!type already_seen 1903) + ) + gc_pointed_to "vec" + (!srcfileloc "c-family/c-pragma.cc" 248) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "c-family/c-pragma.cc" 248) + nil ) + (!pair "pending_weak" + (!type already_seen 1902) + (!srcfileloc "c-family/c-pragma.cc" 248) + nil ) + ) + ) + + (!type struct 1905 nil gc_used "pending_redefinition" + (!srcfileloc "c-family/c-pragma.cc" 496) + (!fields 2 + (!pair "oldname" + (!type already_seen 23) + (!srcfileloc "c-family/c-pragma.cc" 491) + nil ) + (!pair "newname" + (!type already_seen 23) + (!srcfileloc "c-family/c-pragma.cc" 492) + nil ) + ) + nil 774 nil nil ) + + (!type user_struct 1906 + (!type pointer 1907 nil gc_used + (!type already_seen 1906) + ) + gc_pointed_to "vec" + (!srcfileloc "c-family/c-pragma.cc" 496) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "c-family/c-pragma.cc" 496) + nil ) + (!pair "pending_redefinition" + (!type already_seen 1905) + (!srcfileloc "c-family/c-pragma.cc" 496) + nil ) + ) + ) + + (!type struct 1908 + (!type pointer 1909 nil gc_used + (!type already_seen 1908) + ) + gc_pointed_to "opt_stack" + (!srcfileloc "c-family/c-pragma.cc" 1052) + (!fields 6 + (!pair "prev" + (!type already_seen 1909) + (!srcfileloc "c-family/c-pragma.cc" 1046) + nil ) + (!pair "target_binary" + (!type already_seen 23) + (!srcfileloc "c-family/c-pragma.cc" 1047) + nil ) + (!pair "target_strings" + (!type already_seen 23) + (!srcfileloc "c-family/c-pragma.cc" 1048) + nil ) + (!pair "optimize_binary" + (!type already_seen 23) + (!srcfileloc "c-family/c-pragma.cc" 1049) + nil ) + (!pair "optimize_strings" + (!type already_seen 23) + (!srcfileloc "c-family/c-pragma.cc" 1050) + nil ) + (!pair "saved_global_options" + (!type already_seen 834) + (!srcfileloc "c-family/c-pragma.cc" 1051) + (!options + (!option skip string "") + ) + ) + ) + nil 774 nil nil ) + + (!type struct 1910 nil gc_unused "pragma_ns_name"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1911 nil gc_unused "omp_pragma_def"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1912 nil gc_unused "function_format_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1913 nil gc_unused "format_wanted_type"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1914 nil gc_unused "format_check_results"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1915 nil gc_unused "format_check_context"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1916 nil gc_unused "flag_chars_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1917 nil gc_unused "length_modifier"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1918 nil gc_unused "argument_parser"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1919 nil gc_unused "baltoks_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1920 nil gc_unused "token_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1921 nil gc_unused "indirection_suffix"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1922 nil gc_unused "range_label_for_format_type_mismatch"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1923 + (!type pointer 1924 nil gc_unused + (!type already_seen 1923) + ) + gc_used "c_token" + (!srcfileloc "c/c-parser.cc" 175) + (!fields 7 + (!pair "type" + (!type already_seen 2) + (!srcfileloc "c/c-parser.h" 55) + nil ) + (!pair "id_kind" + (!type already_seen 2) + (!srcfileloc "c/c-parser.h" 58) + nil ) + (!pair "keyword" + (!type already_seen 2) + (!srcfileloc "c/c-parser.h" 61) + nil ) + (!pair "pragma_kind" + (!type already_seen 2) + (!srcfileloc "c/c-parser.h" 64) + nil ) + (!pair "location" + (!type already_seen 2) + (!srcfileloc "c/c-parser.h" 66) + nil ) + (!pair "value" + (!type already_seen 23) + (!srcfileloc "c/c-parser.h" 68) + nil ) + (!pair "flags" + (!type already_seen 8) + (!srcfileloc "c/c-parser.h" 70) + nil ) + ) + nil 258 nil nil ) + + (!type struct 1925 + (!type pointer 1926 nil gc_used + (!type already_seen 1925) + ) + gc_pointed_to "c_parser" + (!srcfileloc "c/c-parser.cc" 267) + (!fields 17 + (!pair "tokens" + (!type already_seen 1924) + (!srcfileloc "c/c-parser.cc" 175) + (!options + (!option skip string "") + ) + ) + (!pair "tokens_buf" + (!type array 1927 nil gc_used "4" + (!type already_seen 1923) + ) + (!srcfileloc "c/c-parser.cc" 177) + nil ) + (!pair "tokens_avail" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 180) + nil ) + (!pair "raw_tokens" + (!type pointer 1928 nil gc_used + (!type user_struct 1929 + (!type already_seen 1928) + gc_pointed_to "vec" + (!srcfileloc "c/c-parser.cc" 183) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "c/c-parser.cc" 183) + nil ) + (!pair "c_token" + (!type already_seen 1923) + (!srcfileloc "c/c-parser.cc" 183) + nil ) + ) + ) + ) + (!srcfileloc "c/c-parser.cc" 183) + nil ) + (!pair "raw_tokens_used" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 186) + nil ) + (!pair "error" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 190) + nil ) + (!pair "in_pragma" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 193) + nil ) + (!pair "in_if_block" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 195) + nil ) + (!pair "lex_joined_string" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 200) + nil ) + (!pair "translate_strings_p" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 204) + nil ) + (!pair "objc_pq_context" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 210) + nil ) + (!pair "objc_could_be_foreach_context" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 215) + nil ) + (!pair "objc_need_raw_identifier" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 220) + nil ) + (!pair "in_transaction" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 223) + nil ) + (!pair "objc_property_attr_context" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 226) + nil ) + (!pair "seen_string_literal" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 233) + nil ) + (!pair "last_token_location" + (!type already_seen 2) + (!srcfileloc "c/c-parser.cc" 236) + nil ) + ) + nil 258 nil nil ) + + (!type already_seen 1929) + + (!type struct 1930 nil gc_unused "token_pair"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1931 nil gc_unused "matching_paren_traits" + (!srcfileloc "c/c-parser.cc" 1105) + (!fields 0 ) + nil 258 nil nil ) + + (!type user_struct 1932 nil gc_unused "token_pair" + (!srcfileloc "c/c-parser.cc" 1105) + (!fields 1 + (!pair "matching_paren_traits" + (!type already_seen 1931) + (!srcfileloc "c/c-parser.cc" 1105) + nil ) + ) + ) + + (!type struct 1933 nil gc_unused "matching_brace_traits" + (!srcfileloc "c/c-parser.cc" 1123) + (!fields 0 ) + nil 258 nil nil ) + + (!type user_struct 1934 nil gc_unused "token_pair" + (!srcfileloc "c/c-parser.cc" 1123) + (!fields 1 + (!pair "matching_brace_traits" + (!type already_seen 1933) + (!srcfileloc "c/c-parser.cc" 1123) + nil ) + ) + ) + + (!type struct 1935 nil gc_unused "oacc_routine_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1936 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/c/c-parser.cc:7795"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1937 nil gc_unused "c_generic_association"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1938 nil gc_unused "omp_dim"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 460) + + (!type already_seen 83) + + (!type already_seen 90) + + (!type struct 1939 nil gc_used "cxx_saved_binding" + (!srcfileloc "cp/cp-tree.h" 1822) + (!fields 3 + (!pair "identifier" + (!type already_seen 23) + (!srcfileloc "cp/name-lookup.h" 65) + nil ) + (!pair "binding" + (!type already_seen 89) + (!srcfileloc "cp/name-lookup.h" 67) + nil ) + (!pair "real_type_value" + (!type already_seen 23) + (!srcfileloc "cp/name-lookup.h" 68) + nil ) + ) + nil 516 nil nil ) + + (!type already_seen 667) + + (!type already_seen 666) + + (!type already_seen 664) + + (!type already_seen 662) + + (!type already_seen 660) + + (!type already_seen 88) + + (!type already_seen 87) + + (!type struct 1940 nil gc_unused "cp_expr"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 656) + + (!type already_seen 657) + + (!type already_seen 659) + + (!type struct 1941 nil gc_unused "ovl_iterator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1942 nil gc_unused "ovl_range"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1943 nil gc_unused "lkp_iterator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1944 nil gc_unused "lkp_range"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 93) + + (!type struct 1945 nil gc_unused "releasing_vec"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 669) + + (!type already_seen 668) + + (!type already_seen 71) + + (!type already_seen 670) + + (!type already_seen 671) + + (!type already_seen 672) + + (!type already_seen 673) + + (!type already_seen 674) + + (!type already_seen 675) + + (!type already_seen 80) + + (!type already_seen 78) + + (!type already_seen 676) + + (!type already_seen 677) + + (!type struct 1946 nil gc_used "omp_declare_target_attr" + (!srcfileloc "cp/cp-tree.h" 1865) + (!fields 1 + (!pair "attr_syntax" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1816) + nil ) + ) + nil 516 nil nil ) + + (!type user_struct 1947 + (!type pointer 1948 nil gc_used + (!type already_seen 1947) + ) + gc_pointed_to "vec" + (!srcfileloc "cp/cp-tree.h" 1822) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/cp-tree.h" 1822) + nil ) + (!pair "cxx_saved_binding" + (!type already_seen 1939) + (!srcfileloc "cp/cp-tree.h" 1822) + nil ) + ) + ) + + (!type user_struct 1949 + (!type pointer 1950 nil gc_used + (!type already_seen 1949) + ) + gc_pointed_to "vec" + (!srcfileloc "cp/cp-tree.h" 1865) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/cp-tree.h" 1865) + nil ) + (!pair "omp_declare_target_attr" + (!type already_seen 1946) + (!srcfileloc "cp/cp-tree.h" 1865) + nil ) + ) + ) + + (!type struct 1951 + (!type pointer 1952 nil gc_used + (!type already_seen 1951) + ) + gc_pointed_to "saved_scope" + (!srcfileloc "cp/cp-tree.h" 1868) + (!fields 32 + (!pair "old_bindings" + (!type already_seen 1948) + (!srcfileloc "cp/cp-tree.h" 1822) + nil ) + (!pair "old_namespace" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1823) + nil ) + (!pair "decl_ns_list" + (!type already_seen 84) + (!srcfileloc "cp/cp-tree.h" 1824) + nil ) + (!pair "class_name" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1825) + nil ) + (!pair "class_type" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1826) + nil ) + (!pair "access_specifier" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1827) + nil ) + (!pair "function_decl" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1828) + nil ) + (!pair "lang_base" + (!type already_seen 84) + (!srcfileloc "cp/cp-tree.h" 1829) + nil ) + (!pair "lang_name" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1830) + nil ) + (!pair "template_parms" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1831) + nil ) + (!pair "x_previous_class_level" + (!type already_seen 82) + (!srcfileloc "cp/cp-tree.h" 1832) + nil ) + (!pair "x_saved_tree" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1833) + nil ) + (!pair "x_current_class_ptr" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1836) + nil ) + (!pair "x_current_class_ref" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 1837) + nil ) + (!pair "x_processing_template_decl" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1839) + nil ) + (!pair "x_processing_specialization" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1840) + nil ) + (!pair "x_processing_constraint" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1841) + nil ) + (!pair "suppress_location_wrappers" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1842) + nil ) + (!pair "x_processing_explicit_instantiation" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1843) + nil ) + (!pair "need_pop_function_context" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1844) + nil ) + (!pair "discarded_stmt" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1848) + nil ) + (!pair "consteval_if_p" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1852) + nil ) + (!pair "unevaluated_operand" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1854) + nil ) + (!pair "inhibit_evaluation_warnings" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1855) + nil ) + (!pair "noexcept_operand" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1856) + nil ) + (!pair "ref_temp_count" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 1857) + nil ) + (!pair "x_stmt_tree" + (!type already_seen 453) + (!srcfileloc "cp/cp-tree.h" 1859) + nil ) + (!pair "class_bindings" + (!type already_seen 82) + (!srcfileloc "cp/cp-tree.h" 1861) + nil ) + (!pair "bindings" + (!type already_seen 82) + (!srcfileloc "cp/cp-tree.h" 1862) + nil ) + (!pair "x_local_specializations" + (!type already_seen 393) + (!srcfileloc "cp/cp-tree.h" 1864) + (!options + (!option skip string "") + ) + ) + (!pair "omp_declare_target_attribute" + (!type already_seen 1950) + (!srcfileloc "cp/cp-tree.h" 1865) + nil ) + (!pair "prev" + (!type already_seen 1952) + (!srcfileloc "cp/cp-tree.h" 1867) + nil ) + ) + nil 516 nil nil ) + + (!type struct 1953 nil gc_unused "processing_template_decl_sentinel"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1954 nil gc_unused "warning_sentinel"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1955 nil gc_unused "iloc_sentinel"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1956 nil gc_unused "temp_override"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1957 nil gc_unused "type_identity"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1958 + (!type pointer 1959 nil gc_used + (!type already_seen 1958) + ) + gc_pointed_to "named_label_entry" + (!srcfileloc "cp/decl.cc" 228) + (!fields 13 + (!pair "name" + (!type already_seen 23) + (!srcfileloc "cp/decl.cc" 196) + nil ) + (!pair "label_decl" + (!type already_seen 23) + (!srcfileloc "cp/decl.cc" 198) + nil ) + (!pair "outer" + (!type already_seen 1959) + (!srcfileloc "cp/decl.cc" 200) + nil ) + (!pair "binding_level" + (!type already_seen 82) + (!srcfileloc "cp/decl.cc" 205) + nil ) + (!pair "names_in_scope" + (!type already_seen 23) + (!srcfileloc "cp/decl.cc" 210) + nil ) + (!pair "bad_decls" + (!type already_seen 84) + (!srcfileloc "cp/decl.cc" 214) + nil ) + (!pair "uses" + (!type pointer 1960 nil gc_used + (!type struct 1961 + (!type already_seen 1960) + gc_pointed_to "named_label_use_entry" + (!srcfileloc "cp/decl.cc" 217) + (!fields 5 + (!pair "next" + (!type already_seen 1960) + (!srcfileloc "cp/decl.cc" 173) + nil ) + (!pair "binding_level" + (!type already_seen 82) + (!srcfileloc "cp/decl.cc" 177) + nil ) + (!pair "names_in_scope" + (!type already_seen 23) + (!srcfileloc "cp/decl.cc" 181) + nil ) + (!pair "o_goto_locus" + (!type already_seen 2) + (!srcfileloc "cp/decl.cc" 183) + nil ) + (!pair "in_omp_scope" + (!type already_seen 2) + (!srcfileloc "cp/decl.cc" 187) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 516 nil nil ) + ) + (!srcfileloc "cp/decl.cc" 217) + nil ) + (!pair "in_try_scope" + (!type already_seen 2) + (!srcfileloc "cp/decl.cc" 222) + nil ) + (!pair "in_catch_scope" + (!type already_seen 2) + (!srcfileloc "cp/decl.cc" 223) + nil ) + (!pair "in_omp_scope" + (!type already_seen 2) + (!srcfileloc "cp/decl.cc" 224) + nil ) + (!pair "in_transaction_scope" + (!type already_seen 2) + (!srcfileloc "cp/decl.cc" 225) + nil ) + (!pair "in_constexpr_if" + (!type already_seen 2) + (!srcfileloc "cp/decl.cc" 226) + nil ) + (!pair "in_consteval_if" + (!type already_seen 2) + (!srcfileloc "cp/decl.cc" 227) + nil ) + ) + (!options + (!option for_user string "") + ) + 516 nil nil ) + + (!type already_seen 457) + + (!type already_seen 456) + + (!type already_seen 537) + + (!type already_seen 536) + + (!type already_seen 65) + + (!type already_seen 66) + + (!type already_seen 68) + + (!type already_seen 69) + + (!type already_seen 67) + + (!type already_seen 92) + + (!type already_seen 81) + + (!type already_seen 94) + + (!type already_seen 95) + + (!type already_seen 64) + + (!type struct 1962 nil gc_unused "aggr_init_expr_arg_iterator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1963 nil gc_unused "cp_unevaluated"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1964 nil gc_unused "cp_evaluated"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1965 nil gc_unused "local_specialization_stack"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1966 nil gc_pointed_to "spec_entry" + (!srcfileloc "cp/cp-tree.h" 5700) + (!fields 3 + (!pair "tmpl" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 5697) + nil ) + (!pair "args" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 5698) + nil ) + (!pair "spec" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 5699) + nil ) + ) + (!options + (!option for_user string "") + ) + 516 nil nil ) + + (!type struct 1967 nil gc_used "ovl_op_info_t" + (!srcfileloc "cp/cp-tree.h" 6121) + (!fields 6 + (!pair "identifier" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 6107) + nil ) + (!pair "name" + (!type already_seen 11) + (!srcfileloc "cp/cp-tree.h" 6109) + nil ) + (!pair "mangled_name" + (!type already_seen 11) + (!srcfileloc "cp/cp-tree.h" 6111) + nil ) + (!pair "tree_code" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 6113) + nil ) + (!pair "ovl_op_code" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 6115) + nil ) + (!pair "flags" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 6117) + nil ) + ) + nil 516 nil nil ) + + (!type struct 1968 nil gc_unused "cp_decl_specifier_seq"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1969 nil gc_unused "cp_declarator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1970 nil gc_unused "cp_parameter_declarator"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1971 + (!type pointer 1972 nil gc_used + (!type already_seen 1971) + ) + gc_pointed_to "tinst_level" + (!srcfileloc "cp/pt.cc" 9590) + (!fields 8 + (!pair "next" + (!type already_seen 1972) + (!srcfileloc "cp/cp-tree.h" 6403) + nil ) + (!pair "tldcl" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 6415) + nil ) + (!pair "targs" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 6415) + nil ) + (!pair "path" + (!type already_seen 386) + (!srcfileloc "cp/cp-tree.h" 6421) + nil ) + (!pair "visible" + (!type already_seen 386) + (!srcfileloc "cp/cp-tree.h" 6422) + nil ) + (!pair "locus" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 6467) + nil ) + (!pair "errors" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 6470) + nil ) + (!pair "refcount" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 6478) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 516 nil nil ) + + (!type struct 1973 nil gc_unused "access_failure_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1974 + (!type pointer 1975 nil gc_used + (!type already_seen 1974) + ) + gc_pointed_to "module_state" + (!srcfileloc "cp/module.cc" 3642) + (!fields 28 + (!pair "imports" + (!type already_seen 386) + (!srcfileloc "cp/module.cc" 3394) + nil ) + (!pair "exports" + (!type already_seen 386) + (!srcfileloc "cp/module.cc" 3395) + nil ) + (!pair "parent" + (!type already_seen 1975) + (!srcfileloc "cp/module.cc" 3397) + nil ) + (!pair "name" + (!type already_seen 23) + (!srcfileloc "cp/module.cc" 3398) + nil ) + (!pair "slurp" + (!type pointer 1976 nil gc_used + (!type struct 1977 + (!type already_seen 1976) + gc_pointed_to "slurping" + (!srcfileloc "cp/module.cc" 3329) + (!fields 9 + (!pair "remap" + (!type pointer 1978 nil gc_unused + (!type user_struct 1979 + (!type already_seen 1978) + gc_unused "vec" + (!srcfileloc "cp/module.cc" 3261) + (!fields 3 + (!pair "vl_embed" + (!type undefined 1980 nil gc_unused "vl_embed" + (!srcfileloc "cp/module.cc" 3261) + ) + (!srcfileloc "cp/module.cc" 3261) + nil ) + (!pair "va_heap" + (!type already_seen 827) + (!srcfileloc "cp/module.cc" 3261) + nil ) + (!pair "unsigned" + (!type already_seen 1404) + (!srcfileloc "cp/module.cc" 3261) + nil ) + ) + ) + ) + (!srcfileloc "cp/module.cc" 3262) + (!options + (!option skip string "") + ) + ) + (!pair "from" + (!type pointer 1981 nil gc_unused + (!type struct 1982 + (!type already_seen 1981) + gc_unused "elf_in" + (!srcfileloc "cp/module.cc" 3264) + (!fields 0 ) + nil 516 nil nil ) + ) + (!srcfileloc "cp/module.cc" 3264) + (!options + (!option skip string "") + ) + ) + (!pair "headers" + (!type already_seen 386) + (!srcfileloc "cp/module.cc" 3268) + nil ) + (!pair "macro_defs" + (!type struct 1983 nil gc_used "bytes_in" + (!srcfileloc "cp/module.cc" 3275) + (!fields 0 ) + nil 516 nil nil ) + (!srcfileloc "cp/module.cc" 3275) + nil ) + (!pair "macro_tbl" + (!type already_seen 1983) + (!srcfileloc "cp/module.cc" 3276) + nil ) + (!pair "loc_deltas" + (!type user_struct 1984 nil gc_unused "std::pair" + (!srcfileloc "cp/module.cc" 3163) + (!fields 2 + (!pair "unsigned" + (!type already_seen 1404) + (!srcfileloc "cp/module.cc" 3163) + nil ) + (!pair "unsigned" + (!type already_seen 1404) + (!srcfileloc "cp/module.cc" 3163) + nil ) + ) + ) + (!srcfileloc "cp/module.cc" 3279) + (!options + (!option skip string "") + ) + ) + (!pair "current" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3281) + nil ) + (!pair "remaining" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3282) + nil ) + (!pair "lru" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3283) + nil ) + ) + nil 516 nil nil ) + ) + (!srcfileloc "cp/module.cc" 3400) + nil ) + (!pair "flatname" + (!type already_seen 11) + (!srcfileloc "cp/module.cc" 3402) + nil ) + (!pair "filename" + (!type already_seen 11) + (!srcfileloc "cp/module.cc" 3403) + nil ) + (!pair "entity_lwm" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3406) + nil ) + (!pair "entity_num" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3407) + nil ) + (!pair "ordinary_locs" + (!type user_struct 1985 nil gc_unused "std::pair" + (!srcfileloc "cp/module.cc" 3166) + (!fields 2 + (!pair "location_t" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3166) + nil ) + (!pair "location_t" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3166) + nil ) + ) + ) + (!srcfileloc "cp/module.cc" 3411) + (!options + (!option skip string "") + ) + ) + (!pair "macro_locs" + (!type already_seen 1985) + (!srcfileloc "cp/module.cc" 3412) + (!options + (!option skip string "") + ) + ) + (!pair "loc" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3417) + nil ) + (!pair "crc" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3418) + nil ) + (!pair "mod" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3420) + nil ) + (!pair "remap" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3421) + nil ) + (!pair "subst" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3423) + nil ) + (!pair "loadedness" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3426) + nil ) + (!pair "module_p" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3428) + nil ) + (!pair "header_p" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3429) + nil ) + (!pair "interface_p" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3430) + nil ) + (!pair "partition_p" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3431) + nil ) + (!pair "directness" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3434) + nil ) + (!pair "exported_p" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3436) + nil ) + (!pair "cmi_noted_p" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3437) + nil ) + (!pair "call_init_p" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3439) + nil ) + (!pair "inform_cmi_p" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3441) + nil ) + (!pair "visited_p" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3442) + nil ) + (!pair "extensions" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 3444) + nil ) + ) + (!options + (!option for_user string "") + (!option chain_next string "%h.parent") + ) + 516 nil nil ) + + (!type struct 1986 nil gc_unused "conversion"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1987 nil gc_unused "deferring_access_check_sentinel"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1988 nil gc_unused "diagnostic_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1989 nil gc_unused "diagnosing_failed_constraint"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1990 nil gc_unused "processing_constraint_expression_sentinel"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1991 nil gc_used "atom_hasher" + (!srcfileloc "cp/constraint.cc" 745) + (!fields 0 ) + nil 516 nil nil ) + + (!type struct 1992 + (!type pointer 1993 nil gc_used + (!type already_seen 1992) + ) + gc_pointed_to "constexpr_fundef" + (!srcfileloc "cp/constexpr.cc" 1058) + (!fields 4 + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 8397) + nil ) + (!pair "body" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 8398) + nil ) + (!pair "parms" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 8399) + nil ) + (!pair "result" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 8400) + nil ) + ) + (!options + (!option for_user string "") + ) + 516 nil nil ) + + (!type struct 1994 nil gc_unused "uid_sensitive_constexpr_evaluation_sentinel"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1995 nil gc_unused "uid_sensitive_constexpr_evaluation_checker"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 1996 nil gc_unused "push_access_scope_guard"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 76) + + (!type already_seen 74) + + (!type already_seen 73) + + (!type user_struct 1997 + (!type pointer 1998 nil gc_used + (!type already_seen 1997) + ) + gc_pointed_to "vec" + (!srcfileloc "cp/parser.h" 87) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/parser.h" 87) + nil ) + (!pair "cp_token" + (!type already_seen 73) + (!srcfileloc "cp/parser.h" 87) + nil ) + ) + ) + + (!type user_struct 1999 nil gc_unused "vec" + (!srcfileloc "cp/parser.h" 101) + (!fields 1 + (!pair "cp_token_position" + (!type already_seen 72) + (!srcfileloc "cp/parser.h" 101) + nil ) + ) + ) + + (!type struct 2000 + (!type pointer 2001 nil gc_used + (!type already_seen 2000) + ) + gc_pointed_to "cp_lexer" + (!srcfileloc "cp/parser.h" 233) + (!fields 11 + (!pair "buffer" + (!type already_seen 1998) + (!srcfileloc "cp/parser.h" 87) + nil ) + (!pair "last_token" + (!type already_seen 72) + (!srcfileloc "cp/parser.h" 91) + (!options + (!option skip string "") + ) + ) + (!pair "next_token" + (!type already_seen 72) + (!srcfileloc "cp/parser.h" 95) + (!options + (!option skip string "") + ) + ) + (!pair "saved_tokens" + (!type already_seen 1999) + (!srcfileloc "cp/parser.h" 101) + (!options + (!option skip string "") + ) + ) + (!pair "saved_type" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 104) + nil ) + (!pair "saved_keyword" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 105) + nil ) + (!pair "next" + (!type already_seen 2001) + (!srcfileloc "cp/parser.h" 108) + nil ) + (!pair "debugging_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 111) + nil ) + (!pair "in_pragma" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 115) + nil ) + (!pair "in_omp_attribute_pragma" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 119) + nil ) + (!pair "orphan_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 123) + nil ) + ) + nil 516 nil nil ) + + (!type struct 2002 nil gc_unused "cp_token_ident"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2003 nil gc_used "cp_default_arg_entry" + (!srcfileloc "cp/parser.h" 169) + (!fields 2 + (!pair "class_type" + (!type already_seen 23) + (!srcfileloc "cp/parser.h" 157) + nil ) + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "cp/parser.h" 160) + nil ) + ) + nil 516 nil nil ) + + (!type user_struct 2004 + (!type pointer 2005 nil gc_used + (!type already_seen 2004) + ) + gc_pointed_to "vec" + (!srcfileloc "cp/parser.h" 169) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/parser.h" 169) + nil ) + (!pair "cp_default_arg_entry" + (!type already_seen 2003) + (!srcfileloc "cp/parser.h" 169) + nil ) + ) + ) + + (!type struct 2006 nil gc_used "cp_unparsed_functions_entry" + (!srcfileloc "cp/parser.h" 377) + (!fields 4 + (!pair "funs_with_default_args" + (!type already_seen 2005) + (!srcfileloc "cp/parser.h" 169) + nil ) + (!pair "funs_with_definitions" + (!type already_seen 84) + (!srcfileloc "cp/parser.h" 173) + nil ) + (!pair "nsdmis" + (!type already_seen 84) + (!srcfileloc "cp/parser.h" 177) + nil ) + (!pair "noexcepts" + (!type already_seen 84) + (!srcfileloc "cp/parser.h" 180) + nil ) + ) + nil 516 nil nil ) + + (!type struct 2007 + (!type pointer 2008 nil gc_used + (!type already_seen 2007) + ) + gc_pointed_to "cp_parser_context" + (!srcfileloc "cp/parser.h" 267) + (!fields 3 + (!pair "status" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 202) + nil ) + (!pair "object_type" + (!type already_seen 23) + (!srcfileloc "cp/parser.h" 207) + nil ) + (!pair "next" + (!type already_seen 2008) + (!srcfileloc "cp/parser.h" 210) + nil ) + ) + nil 516 nil nil ) + + (!type struct 2009 + (!type pointer 2010 nil gc_unused + (!type already_seen 2009) + ) + gc_unused "cp_omp_declare_simd_data" + (!srcfileloc "cp/parser.h" 389) + (!fields 0 ) + nil 516 nil nil ) + + (!type struct 2011 + (!type pointer 2012 nil gc_unused + (!type already_seen 2011) + ) + gc_unused "cp_oacc_routine_data" + (!srcfileloc "cp/parser.h" 393) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2013 + (!type pointer 2014 nil gc_used + (!type already_seen 2013) + ) + gc_pointed_to "vec" + (!srcfileloc "cp/parser.h" 377) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/parser.h" 377) + nil ) + (!pair "cp_unparsed_functions_entry" + (!type already_seen 2006) + (!srcfileloc "cp/parser.h" 377) + nil ) + ) + ) + + (!type struct 2015 + (!type pointer 2016 nil gc_used + (!type already_seen 2015) + ) + gc_pointed_to "cp_parser" + (!srcfileloc "cp/parser.cc" 47576) + (!fields 39 + (!pair "lexer" + (!type already_seen 2001) + (!srcfileloc "cp/parser.h" 233) + nil ) + (!pair "scope" + (!type already_seen 23) + (!srcfileloc "cp/parser.h" 248) + nil ) + (!pair "object_scope" + (!type already_seen 23) + (!srcfileloc "cp/parser.h" 255) + nil ) + (!pair "qualifying_scope" + (!type already_seen 23) + (!srcfileloc "cp/parser.h" 256) + nil ) + (!pair "context" + (!type already_seen 2008) + (!srcfileloc "cp/parser.h" 267) + nil ) + (!pair "allow_gnu_extensions_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 271) + nil ) + (!pair "greater_than_is_operator_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 278) + nil ) + (!pair "default_arg_ok_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 283) + nil ) + (!pair "integral_constant_expression_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 287) + nil ) + (!pair "allow_non_integral_constant_expression_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 293) + nil ) + (!pair "non_integral_constant_expression_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 297) + nil ) + (!pair "local_variables_forbidden_p" + (!type already_seen 8) + (!srcfileloc "cp/parser.h" 304) + nil ) + (!pair "in_unbraced_linkage_specification_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 309) + nil ) + (!pair "in_declarator_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 313) + nil ) + (!pair "in_template_argument_list_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 316) + nil ) + (!pair "in_statement" + (!type already_seen 8) + (!srcfileloc "cp/parser.h" 328) + nil ) + (!pair "in_switch_statement_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 334) + nil ) + (!pair "in_type_id_in_expr_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 339) + nil ) + (!pair "translate_strings_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 343) + nil ) + (!pair "in_function_body" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 347) + nil ) + (!pair "in_transaction" + (!type already_seen 8) + (!srcfileloc "cp/parser.h" 351) + nil ) + (!pair "colon_corrects_to_scope_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 354) + nil ) + (!pair "colon_doesnt_start_class_def_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 360) + nil ) + (!pair "objective_c_message_context_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 364) + nil ) + (!pair "type_definition_forbidden_message" + (!type already_seen 11) + (!srcfileloc "cp/parser.h" 369) + nil ) + (!pair "type_definition_forbidden_message_arg" + (!type already_seen 11) + (!srcfileloc "cp/parser.h" 372) + nil ) + (!pair "unparsed_queues" + (!type already_seen 2014) + (!srcfileloc "cp/parser.h" 377) + nil ) + (!pair "num_classes_being_defined" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 381) + nil ) + (!pair "num_template_parameter_lists" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 385) + nil ) + (!pair "omp_declare_simd" + (!type already_seen 2010) + (!srcfileloc "cp/parser.h" 389) + (!options + (!option skip string "") + ) + ) + (!pair "oacc_routine" + (!type already_seen 2012) + (!srcfileloc "cp/parser.h" 393) + (!options + (!option skip string "") + ) + ) + (!pair "auto_is_implicit_function_template_parm_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 397) + nil ) + (!pair "fully_implicit_function_template_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 402) + nil ) + (!pair "omp_attrs_forbidden_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 405) + nil ) + (!pair "implicit_template_parms" + (!type already_seen 23) + (!srcfileloc "cp/parser.h" 413) + nil ) + (!pair "implicit_template_scope" + (!type already_seen 82) + (!srcfileloc "cp/parser.h" 420) + nil ) + (!pair "in_result_type_constraint_p" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 425) + nil ) + (!pair "prevent_constrained_type_specifiers" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 429) + nil ) + (!pair "innermost_linkage_specification_location" + (!type already_seen 2) + (!srcfileloc "cp/parser.h" 433) + nil ) + ) + nil 516 nil nil ) + + (!type struct 2017 nil gc_unused "rejection_reason"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2018 nil gc_unused "z_candidate"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2019 nil gc_unused "candidate_warning"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2020 nil gc_unused "conversion_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2021 nil gc_unused "dealloc_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2022 nil gc_unused "in_consteval_if_p_temp_override"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2023 nil gc_unused "NonPublicField"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2024 nil gc_unused "NonTrivialField"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2025 + (!type pointer 2026 nil gc_unused + (!type already_seen 2025) + ) + gc_unused "class_stack_node"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2027 nil gc_unused "vtbl_init_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2028 nil gc_unused "flexmems_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2029 nil gc_unused "abi_tag_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2030 nil gc_unused "find_final_overrider_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 2031 + (!type pointer 2032 nil gc_used + (!type already_seen 2031) + ) + gc_pointed_to "hash_map" + (!srcfileloc "cp/class.cc" 3392) + (!fields 2 + (!pair "int" + (!type already_seen 373) + (!srcfileloc "cp/class.cc" 3392) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "cp/class.cc" 3392) + nil ) + ) + ) + + (!type struct 2033 nil gc_unused "secondary_vptr_vtt_init_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2034 nil gc_used "constexpr_fundef_hasher" + (!srcfileloc "cp/constexpr.cc" 151) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2035 + (!type pointer 2036 nil gc_used + (!type already_seen 2035) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/constexpr.cc" 151) + (!fields 1 + (!pair "constexpr_fundef_hasher" + (!type already_seen 2034) + (!srcfileloc "cp/constexpr.cc" 151) + nil ) + ) + ) + + (!type struct 2037 nil gc_pointed_to "constexpr_call" + (!srcfileloc "cp/constexpr.cc" 1071) + (!fields 5 + (!pair "fundef" + (!type already_seen 1993) + (!srcfileloc "cp/constexpr.cc" 1058) + nil ) + (!pair "bindings" + (!type already_seen 23) + (!srcfileloc "cp/constexpr.cc" 1060) + nil ) + (!pair "result" + (!type already_seen 23) + (!srcfileloc "cp/constexpr.cc" 1065) + nil ) + (!pair "hash" + (!type already_seen 2) + (!srcfileloc "cp/constexpr.cc" 1068) + nil ) + (!pair "manifestly_const_eval" + (!type already_seen 2) + (!srcfileloc "cp/constexpr.cc" 1070) + nil ) + ) + (!options + (!option for_user string "") + ) + 516 nil nil ) + + (!type struct 2038 nil gc_used "constexpr_call_hasher" + (!srcfileloc "cp/constexpr.cc" 1212) + (!fields 0 ) + nil 516 nil nil ) + + (!type struct 2039 nil gc_unused "constexpr_global_ctx"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2040 nil gc_unused "constexpr_ctx"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 2041 + (!type pointer 2042 nil gc_used + (!type already_seen 2041) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/constexpr.cc" 1212) + (!fields 1 + (!pair "constexpr_call_hasher" + (!type already_seen 2038) + (!srcfileloc "cp/constexpr.cc" 1212) + nil ) + ) + ) + + (!type struct 2043 nil gc_unused "replace_decl_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2044 nil gc_unused "free_bindings"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2045 nil gc_unused "check_for_return_continue_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2046 nil gc_unused "subst_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2047 nil gc_unused "sat_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2048 nil gc_unused "norm_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 2049 + (!type pointer 2050 nil gc_used + (!type already_seen 2049) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/constraint.cc" 745) + (!fields 1 + (!pair "atom_hasher" + (!type already_seen 1991) + (!srcfileloc "cp/constraint.cc" 745) + nil ) + ) + ) + + (!type struct 2051 nil gc_pointed_to "sat_entry" + (!srcfileloc "cp/constraint.cc" 2458) + (!fields 8 + (!pair "atom" + (!type already_seen 23) + (!srcfileloc "cp/constraint.cc" 2428) + nil ) + (!pair "args" + (!type already_seen 23) + (!srcfileloc "cp/constraint.cc" 2431) + nil ) + (!pair "result" + (!type already_seen 23) + (!srcfileloc "cp/constraint.cc" 2438) + nil ) + (!pair "location" + (!type already_seen 2) + (!srcfileloc "cp/constraint.cc" 2442) + nil ) + (!pair "ftc_begin" + (!type already_seen 2) + (!srcfileloc "cp/constraint.cc" 2447) + nil ) + (!pair "ftc_end" + (!type already_seen 2) + (!srcfileloc "cp/constraint.cc" 2447) + nil ) + (!pair "diagnose_instability" + (!type already_seen 2) + (!srcfileloc "cp/constraint.cc" 2452) + nil ) + (!pair "evaluating" + (!type already_seen 2) + (!srcfileloc "cp/constraint.cc" 2457) + nil ) + ) + (!options + (!option for_user string "") + ) + 516 nil nil ) + + (!type struct 2052 nil gc_used "sat_hasher" + (!srcfileloc "cp/constraint.cc" 2530) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2053 + (!type pointer 2054 nil gc_used + (!type already_seen 2053) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/constraint.cc" 2530) + (!fields 1 + (!pair "sat_hasher" + (!type already_seen 2052) + (!srcfileloc "cp/constraint.cc" 2530) + nil ) + ) + ) + + (!type struct 2055 nil gc_unused "satisfaction_cache"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2056 nil gc_pointed_to "coroutine_info" + (!srcfileloc "cp/coroutines.cc" 100) + (!fields 12 + (!pair "function_decl" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 84) + nil ) + (!pair "actor_decl" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 85) + nil ) + (!pair "destroy_decl" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 86) + nil ) + (!pair "promise_type" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 87) + nil ) + (!pair "handle_type" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 88) + nil ) + (!pair "self_h_proxy" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 89) + nil ) + (!pair "promise_proxy" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 92) + nil ) + (!pair "return_void" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 93) + nil ) + (!pair "first_coro_keyword" + (!type already_seen 2) + (!srcfileloc "cp/coroutines.cc" 94) + nil ) + (!pair "coro_ret_type_error_emitted" + (!type already_seen 2) + (!srcfileloc "cp/coroutines.cc" 97) + nil ) + (!pair "coro_promise_error_emitted" + (!type already_seen 2) + (!srcfileloc "cp/coroutines.cc" 98) + nil ) + (!pair "coro_co_return_error_emitted" + (!type already_seen 2) + (!srcfileloc "cp/coroutines.cc" 99) + nil ) + ) + (!options + (!option for_user string "") + ) + 516 nil nil ) + + (!type struct 2057 nil gc_used "coroutine_info_hasher" + (!srcfileloc "cp/coroutines.cc" 114) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2058 + (!type pointer 2059 nil gc_used + (!type already_seen 2058) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/coroutines.cc" 114) + (!fields 1 + (!pair "coroutine_info_hasher" + (!type already_seen 2057) + (!srcfileloc "cp/coroutines.cc" 114) + nil ) + ) + ) + + (!type struct 2060 nil gc_unused "proxy_replace"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2061 nil gc_unused "coro_aw_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2062 nil gc_unused "suspend_point_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2063 nil gc_unused "await_xform_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2064 nil gc_unused "param_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2065 nil gc_unused "local_var_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2066 nil gc_unused "local_vars_transform"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2067 nil gc_unused "susp_frame_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2068 nil gc_unused "coro_interesting_subtree"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2069 nil gc_unused "var_nest_node"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2070 nil gc_unused "truth_if_transform"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2071 nil gc_unused "param_frame_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2072 nil gc_unused "local_vars_frame_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2073 nil gc_unused "cp_genericize_omp_taskreg"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2074 nil gc_unused "cp_genericize_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2075 nil gc_unused "cp_fold_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2076 nil gc_pointed_to "source_location_table_entry" + (!srcfileloc "cp/cp-gimplify.cc" 3157) + (!fields 3 + (!pair "loc" + (!type already_seen 2) + (!srcfileloc "cp/cp-gimplify.cc" 3154) + nil ) + (!pair "uid" + (!type already_seen 2) + (!srcfileloc "cp/cp-gimplify.cc" 3155) + nil ) + (!pair "var" + (!type already_seen 23) + (!srcfileloc "cp/cp-gimplify.cc" 3156) + nil ) + ) + (!options + (!option for_user string "") + ) + 516 nil nil ) + + (!type struct 2077 nil gc_used "source_location_table_entry_hash" + (!srcfileloc "cp/cp-gimplify.cc" 3233) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2078 + (!type pointer 2079 nil gc_used + (!type already_seen 2078) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/cp-gimplify.cc" 3233) + (!fields 1 + (!pair "source_location_table_entry_hash" + (!type already_seen 2077) + (!srcfileloc "cp/cp-gimplify.cc" 3233) + nil ) + ) + ) + + (!type already_seen 1961) + + (!type struct 2080 nil gc_used "incomplete_var" + (!srcfileloc "cp/decl.cc" 256) + (!fields 2 + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "cp/decl.cc" 251) + nil ) + (!pair "incomplete_type" + (!type already_seen 23) + (!srcfileloc "cp/decl.cc" 252) + nil ) + ) + nil 516 nil nil ) + + (!type user_struct 2081 + (!type pointer 2082 nil gc_used + (!type already_seen 2081) + ) + gc_pointed_to "vec" + (!srcfileloc "cp/decl.cc" 256) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/decl.cc" 256) + nil ) + (!pair "incomplete_var" + (!type already_seen 2080) + (!srcfileloc "cp/decl.cc" 256) + nil ) + ) + ) + + (!type struct 2083 nil gc_unused "cp_switch"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2084 nil gc_unused "typename_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2085 nil gc_used "typename_hasher" + (!srcfileloc "cp/decl.cc" 4043) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2086 + (!type pointer 2087 nil gc_used + (!type already_seen 2086) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/decl.cc" 4043) + (!fields 1 + (!pair "typename_hasher" + (!type already_seen 2085) + (!srcfileloc "cp/decl.cc" 4043) + nil ) + ) + ) + + (!type struct 2088 nil gc_unused "predefined_identifier"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2089 nil gc_unused "reshape_iter"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2090 + (!type pointer 2091 nil gc_unused + (!type already_seen 2090) + ) + gc_unused "priority_info_s"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2092 nil gc_used "mangled_decl_hash" + (!srcfileloc "cp/decl2.cc" 137) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2093 + (!type pointer 2094 nil gc_used + (!type already_seen 2093) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/decl2.cc" 137) + (!fields 1 + (!pair "mangled_decl_hash" + (!type already_seen 2092) + (!srcfileloc "cp/decl2.cc" 137) + nil ) + ) + ) + + (!type struct 2095 nil gc_used "pending_noexcept" + (!srcfileloc "cp/except.cc" 1111) + (!fields 2 + (!pair "fn" + (!type already_seen 23) + (!srcfileloc "cp/except.cc" 1108) + nil ) + (!pair "loc" + (!type already_seen 2) + (!srcfileloc "cp/except.cc" 1109) + nil ) + ) + nil 516 nil nil ) + + (!type user_struct 2096 + (!type pointer 2097 nil gc_used + (!type already_seen 2096) + ) + gc_pointed_to "vec" + (!srcfileloc "cp/except.cc" 1111) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/except.cc" 1111) + nil ) + (!pair "pending_noexcept" + (!type already_seen 2095) + (!srcfileloc "cp/except.cc" 1111) + nil ) + ) + ) + + (!type struct 2098 nil gc_unused "find_uninit_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2099 nil gc_used "tree_int" + (!srcfileloc "cp/lambda.cc" 1404) + (!fields 2 + (!pair "t" + (!type already_seen 23) + (!srcfileloc "cp/lambda.cc" 1401) + nil ) + (!pair "i" + (!type already_seen 2) + (!srcfileloc "cp/lambda.cc" 1402) + nil ) + ) + nil 516 nil nil ) + + (!type user_struct 2100 + (!type pointer 2101 nil gc_used + (!type already_seen 2100) + ) + gc_pointed_to "vec" + (!srcfileloc "cp/lambda.cc" 1404) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/lambda.cc" 1404) + nil ) + (!pair "tree_int" + (!type already_seen 2099) + (!srcfileloc "cp/lambda.cc" 1404) + nil ) + ) + ) + + (!type struct 2102 nil gc_unused "impl_files"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2103 nil gc_unused "module_token_filter"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2104 nil gc_used "conv_type_hasher" + (!srcfileloc "cp/lex.cc" 813) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2105 + (!type pointer 2106 nil gc_used + (!type already_seen 2105) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/lex.cc" 813) + (!fields 1 + (!pair "conv_type_hasher" + (!type already_seen 2104) + (!srcfileloc "cp/lex.cc" 813) + nil ) + ) + ) + + (!type struct 2107 nil gc_unused "clause"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2108 nil gc_unused "formula"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2109 nil gc_pointed_to "subsumption_entry" + (!srcfileloc "cp/logic.cc" 725) + (!fields 3 + (!pair "lhs" + (!type already_seen 23) + (!srcfileloc "cp/logic.cc" 722) + nil ) + (!pair "rhs" + (!type already_seen 23) + (!srcfileloc "cp/logic.cc" 723) + nil ) + (!pair "result" + (!type already_seen 2) + (!srcfileloc "cp/logic.cc" 724) + nil ) + ) + (!options + (!option for_user string "") + ) + 516 nil nil ) + + (!type struct 2110 nil gc_used "subsumption_hasher" + (!srcfileloc "cp/logic.cc" 751) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2111 + (!type pointer 2112 nil gc_used + (!type already_seen 2111) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/logic.cc" 751) + (!fields 1 + (!pair "subsumption_hasher" + (!type already_seen 2110) + (!srcfileloc "cp/logic.cc" 751) + nil ) + ) + ) + + (!type struct 2113 nil gc_used "globals" + (!srcfileloc "cp/mangle.cc" 126) + (!fields 6 + (!pair "substitutions" + (!type already_seen 84) + (!srcfileloc "cp/mangle.cc" 107) + nil ) + (!pair "entity" + (!type already_seen 23) + (!srcfileloc "cp/mangle.cc" 110) + (!options + (!option skip string "") + ) + ) + (!pair "parm_depth" + (!type already_seen 2) + (!srcfileloc "cp/mangle.cc" 113) + nil ) + (!pair "need_abi_warning" + (!type already_seen 2) + (!srcfileloc "cp/mangle.cc" 117) + nil ) + (!pair "need_cxx17_warning" + (!type already_seen 2) + (!srcfileloc "cp/mangle.cc" 120) + nil ) + (!pair "mod" + (!type already_seen 2) + (!srcfileloc "cp/mangle.cc" 123) + nil ) + ) + nil 516 nil nil ) + + (!type struct 2114 nil gc_unused "comp_cat_info_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2115 nil gc_unused "comp_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2116 nil gc_unused "nodel_ptr_hash"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 2117 nil gc_unused "simple_hashmap_traits,int>" + (!srcfileloc "cp/module.cc" 331) + (!fields 2 + (!pair "int" + (!type already_seen 373) + (!srcfileloc "cp/module.cc" 331) + nil ) + (!pair "nodel_ptr_hash" + (!srcfileloc "cp/module.cc" 332) + (!fields 3 + (!pair "ptr_int_traits" + (!type already_seen 2117) + (!srcfileloc "cp/module.cc" 332) + nil ) + (!pair "signed" + (!type undefined 2120 nil gc_unused "signed" + (!srcfileloc "cp/module.cc" 332) + ) + (!srcfileloc "cp/module.cc" 332) + nil ) + (!pair "void" + (!type pointer 2121 nil gc_unused + (!type struct 2122 + (!type already_seen 2121) + gc_unused "void"nil + (!fields 0 ) + nil 0 nil nil ) + ) + (!srcfileloc "cp/module.cc" 332) + nil ) + ) + ) + + (!type already_seen 2122) + + (!type already_seen 2120) + + (!type struct 2123 nil gc_unused "data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2124 nil gc_unused "bytes"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 1982) + + (!type already_seen 1983) + + (!type struct 2125 nil gc_unused "elf_out"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2126 nil gc_unused "bytes_out"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2127 nil gc_unused "elf"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2128 nil gc_unused "stat"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2129 nil gc_unused "depset"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2130 nil gc_unused "pending_key" + (!srcfileloc "cp/module.cc" 2691) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2131 nil gc_unused "hash_map>" + (!srcfileloc "cp/module.cc" 2691) + (!fields 2 + (!pair "auto_vec>" + (!srcfileloc "cp/module.cc" 2704) + (!fields 2 + (!pair "auto_vec>" + (!srcfileloc "cp/module.cc" 2846) + (!fields 4 + (!pair "uintptr_t" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 2846) + nil ) + (!pair "simple_hashmap_traits" + (!srcfileloc "cp/module.cc" 3815) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/module.cc" 3815) + nil ) + (!pair "module_state" + (!type already_seen 1975) + (!srcfileloc "cp/module.cc" 3815) + nil ) + ) + ) + + (!type user_struct 2147 + (!type pointer 2148 nil gc_used + (!type already_seen 2147) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/module.cc" 3818) + (!fields 1 + (!pair "module_state_hash" + (!type already_seen 2144) + (!srcfileloc "cp/module.cc" 3818) + nil ) + ) + ) + + (!type user_struct 2149 nil gc_unused "hash_map,unsigned>>" + (!srcfileloc "cp/module.cc" 3824) + (!fields 5 + (!pair "unsigned" + (!type already_seen 1404) + (!srcfileloc "cp/module.cc" 3824) + nil ) + (!pair "0" + (!type undefined 2150 nil gc_unused "0" + (!srcfileloc "cp/module.cc" 3824) + ) + (!srcfileloc "cp/module.cc" 3824) + nil ) + (!pair "simple_hashmap_traits" + (!srcfileloc "cp/module.cc" 4481) + (!fields 1 + (!pair "note_def_cache_hasher" + (!type already_seen 2154) + (!srcfileloc "cp/module.cc" 4481) + nil ) + ) + ) + + (!type struct 2157 nil gc_unused "add_binding_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2158 nil gc_unused "tm"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2159 nil gc_used "macro_export" + (!srcfileloc "cp/module.cc" 16557) + (!fields 2 + (!pair "def" + (!type already_seen 19) + (!srcfileloc "cp/module.cc" 16550) + nil ) + (!pair "undef_loc" + (!type already_seen 2) + (!srcfileloc "cp/module.cc" 16551) + nil ) + ) + nil 516 nil nil ) + + (!type struct 2160 nil gc_unused "macro_import"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 2161 + (!type pointer 2162 nil gc_used + (!type already_seen 2161) + ) + gc_pointed_to "vec" + (!srcfileloc "cp/module.cc" 16714) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/module.cc" 16714) + nil ) + (!pair "macro_export" + (!type already_seen 2159) + (!srcfileloc "cp/module.cc" 16714) + nil ) + ) + ) + + (!type struct 2163 nil gc_unused "rlimit"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2164 nil gc_unused "name_lookup"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2165 nil gc_unused "namespace_limit_reached"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2166 nil gc_unused "show_candidate_location"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2167 nil gc_unused "suggest_alternatives"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2168 nil gc_unused "namespace_hints"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2169 nil gc_unused "std_name_hint"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2170 nil gc_unused "missing_std_header"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2171 nil gc_unused "macro_use_before_def"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2172 nil gc_unused "type_id_in_expr_sentinel"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2173 nil gc_unused "saved_token_sentinel"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2174 nil gc_unused "cp_parser_binary_operations_map_node"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2175 nil gc_unused "cp_parser_expression_stack_entry"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2176 nil gc_unused "tentative_firewall"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2177 nil gc_unused "cp_omp_attribute_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2178 nil gc_unused "scope_sentinel"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2179 nil gc_unused "class_decl_loc_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type undefined 2180 nil gc_unused "class_to_loc_map_t::iterator" + (!srcfileloc "cp/parser.cc" 33787) + ) + + (!type struct 2181 + (!type pointer 2182 nil gc_used + (!type already_seen 2181) + ) + gc_pointed_to "pending_template" + (!srcfileloc "cp/pt.cc" 9599) + (!fields 2 + (!pair "next" + (!type already_seen 2182) + (!srcfileloc "cp/pt.cc" 58) + nil ) + (!pair "tinst" + (!type already_seen 1972) + (!srcfileloc "cp/pt.cc" 59) + nil ) + ) + (!options + (!option chain_next string "%h.next") + ) + 516 nil nil ) + + (!type struct 2183 nil gc_used "spec_hasher" + (!srcfileloc "cp/pt.cc" 114) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2184 + (!type pointer 2185 nil gc_used + (!type already_seen 2184) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/pt.cc" 114) + (!fields 1 + (!pair "spec_hasher" + (!type already_seen 2183) + (!srcfileloc "cp/pt.cc" 114) + nil ) + ) + ) + + (!type struct 2186 nil gc_unused "find_parameter_pack_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2187 nil gc_unused "template_parm_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2188 nil gc_unused "uses_all_template_parms_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2189 nil gc_unused "freelist"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2190 nil gc_unused "pair_fn_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2191 nil gc_unused "find_template_parameter_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2192 nil gc_unused "el_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2193 nil gc_unused "auto_hash"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 2194 + (!type pointer 2195 nil gc_used + (!type already_seen 2194) + ) + gc_pointed_to "hash_map" + (!srcfileloc "cp/pt.cc" 29962) + (!fields 2 + (!pair "tree_pair_p" + (!type already_seen 538) + (!srcfileloc "cp/pt.cc" 29962) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "cp/pt.cc" 29962) + nil ) + ) + ) + + (!type struct 2196 nil gc_used "tinfo_s" + (!srcfileloc "cp/rtti.cc" 122) + (!fields 3 + (!pair "type" + (!type already_seen 23) + (!srcfileloc "cp/rtti.cc" 65) + nil ) + (!pair "vtable" + (!type already_seen 23) + (!srcfileloc "cp/rtti.cc" 67) + nil ) + (!pair "name" + (!type already_seen 23) + (!srcfileloc "cp/rtti.cc" 70) + nil ) + ) + nil 516 nil nil ) + + (!type user_struct 2197 + (!type pointer 2198 nil gc_used + (!type already_seen 2197) + ) + gc_pointed_to "vec" + (!srcfileloc "cp/rtti.cc" 122) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/rtti.cc" 122) + nil ) + (!pair "tinfo_s" + (!type already_seen 2196) + (!srcfileloc "cp/rtti.cc" 122) + nil ) + ) + ) + + (!type struct 2199 nil gc_used "deferred_access" + (!srcfileloc "cp/semantics.cc" 137) + (!fields 2 + (!pair "deferred_access_checks" + (!type already_seen 77) + (!srcfileloc "cp/semantics.cc" 130) + nil ) + (!pair "deferring_access_checks_kind" + (!type already_seen 2) + (!srcfileloc "cp/semantics.cc" 133) + nil ) + ) + nil 516 nil nil ) + + (!type user_struct 2200 + (!type pointer 2201 nil gc_used + (!type already_seen 2200) + ) + gc_pointed_to "vec" + (!srcfileloc "cp/semantics.cc" 137) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "cp/semantics.cc" 137) + nil ) + (!pair "deferred_access" + (!type already_seen 2199) + (!srcfileloc "cp/semantics.cc" 137) + nil ) + ) + ) + + (!type struct 2202 nil gc_unused "cp_check_omp_declare_reduction_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2203 nil gc_unused "omp_target_walk_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2204 nil gc_unused "cplus_array_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2205 nil gc_used "cplus_array_hasher" + (!srcfileloc "cp/tree.cc" 1034) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2206 + (!type pointer 2207 nil gc_used + (!type already_seen 2206) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/tree.cc" 1034) + (!fields 1 + (!pair "cplus_array_hasher" + (!type already_seen 2205) + (!srcfileloc "cp/tree.cc" 1034) + nil ) + ) + ) + + (!type struct 2208 nil gc_unused "list_proxy"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2209 nil gc_used "list_hasher" + (!srcfileloc "cp/tree.cc" 2166) + (!fields 0 ) + nil 516 nil nil ) + + (!type user_struct 2210 + (!type pointer 2211 nil gc_used + (!type already_seen 2210) + ) + gc_pointed_to "hash_table" + (!srcfileloc "cp/tree.cc" 2166) + (!fields 1 + (!pair "list_hasher" + (!type already_seen 2209) + (!srcfileloc "cp/tree.cc" 2166) + nil ) + ) + ) + + (!type struct 2212 nil gc_unused "bot_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2213 nil gc_unused "replace_placeholders_t"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2214 nil gc_unused "work_node"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2215 nil gc_unused "vtv_graph_node"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2216 nil gc_unused "Dsymbol"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 62) + + (!type already_seen 635) + + (!type struct 2217 nil gc_unused "ClassDeclaration"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2218 nil gc_unused "EnumDeclaration"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 441) + + (!type struct 2219 nil gc_unused "StructDeclaration"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2220 nil gc_unused "TypeInfoDeclaration"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2221 nil gc_unused "VarDeclaration"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2222 + (!type pointer 2223 nil gc_unused + (!type already_seen 2222) + ) + gc_unused "Expression"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2224 nil gc_unused "ClassReferenceExp"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2225 nil gc_unused "IndexExp"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2226 nil gc_unused "SliceExp"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 443) + + (!type already_seen 448) + + (!type already_seen 533) + + (!type struct 2227 nil gc_unused "TypeFunction"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2228 nil gc_unused "Parameter"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2229 nil gc_unused "BaseClass"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2230 nil gc_unused "Scope"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2231 nil gc_unused "Loc"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 2232 nil gc_unused "Array" + (!srcfileloc "d/d-tree.h" 46) + (!fields 1 + (!pair "Expression" + (!type already_seen 2223) + (!srcfileloc "d/d-tree.h" 46) + nil ) + ) + ) + + (!type already_seen 436) + + (!type already_seen 450) + + (!type already_seen 446) + + (!type already_seen 654) + + (!type already_seen 445) + + (!type struct 2233 nil gc_unused "builtin_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2234 nil gc_unused "d_option_data"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2235 nil gc_unused "TypeInfoVisitor"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2236 nil gc_unused "TypeInfoDeclVisitor"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2237 nil gc_unused "SpeculativeTypeVisitor"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2238 nil gc_used "module_hasher" + (!srcfileloc "fortran/trans-decl.cc" 5066) + (!fields 0 ) + nil 16 nil nil ) + + (!type user_struct 2239 + (!type pointer 2240 nil gc_used + (!type already_seen 2239) + ) + gc_pointed_to "hash_table" + (!srcfileloc "fortran/trans-decl.cc" 5066) + (!fields 1 + (!pair "module_hasher" + (!type already_seen 2238) + (!srcfileloc "fortran/trans-decl.cc" 5066) + nil ) + ) + ) + + (!type struct 2241 nil gc_pointed_to "module_htab_entry" + (!srcfileloc "fortran/trans.h" 706) + (!fields 3 + (!pair "name" + (!type already_seen 11) + (!srcfileloc "fortran/trans.h" 703) + nil ) + (!pair "namespace_decl" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 704) + nil ) + (!pair "decls" + (!type pointer 2242 nil gc_used + (!type user_struct 2243 + (!type already_seen 2242) + gc_pointed_to "hash_table" + (!srcfileloc "fortran/trans.h" 705) + (!fields 1 + (!pair "module_decl_hasher" + (!type struct 2244 nil gc_used "module_decl_hasher" + (!srcfileloc "fortran/trans.h" 705) + (!fields 0 ) + nil 16 nil nil ) + (!srcfileloc "fortran/trans.h" 705) + nil ) + ) + ) + ) + (!srcfileloc "fortran/trans.h" 705) + nil ) + ) + (!options + (!option for_user string "") + ) + 16 nil nil ) + + (!type struct 2245 nil gc_used "gfc_intrinsic_map_t" + (!srcfileloc "fortran/trans-intrinsic.cc" 86) + (!fields 19 + (!pair "id" + (!type already_seen 2) + (!srcfileloc "fortran/trans-intrinsic.cc" 52) + nil ) + (!pair "float_built_in" + (!type already_seen 2) + (!srcfileloc "fortran/trans-intrinsic.cc" 56) + nil ) + (!pair "double_built_in" + (!type already_seen 2) + (!srcfileloc "fortran/trans-intrinsic.cc" 57) + nil ) + (!pair "long_double_built_in" + (!type already_seen 2) + (!srcfileloc "fortran/trans-intrinsic.cc" 58) + nil ) + (!pair "complex_float_built_in" + (!type already_seen 2) + (!srcfileloc "fortran/trans-intrinsic.cc" 59) + nil ) + (!pair "complex_double_built_in" + (!type already_seen 2) + (!srcfileloc "fortran/trans-intrinsic.cc" 60) + nil ) + (!pair "complex_long_double_built_in" + (!type already_seen 2) + (!srcfileloc "fortran/trans-intrinsic.cc" 61) + nil ) + (!pair "libm_name" + (!type already_seen 2) + (!srcfileloc "fortran/trans-intrinsic.cc" 66) + nil ) + (!pair "complex_available" + (!type already_seen 2) + (!srcfileloc "fortran/trans-intrinsic.cc" 69) + nil ) + (!pair "is_constant" + (!type already_seen 2) + (!srcfileloc "fortran/trans-intrinsic.cc" 72) + nil ) + (!pair "name" + (!type already_seen 11) + (!srcfileloc "fortran/trans-intrinsic.cc" 75) + nil ) + (!pair "real4_decl" + (!type already_seen 23) + (!srcfileloc "fortran/trans-intrinsic.cc" 78) + nil ) + (!pair "real8_decl" + (!type already_seen 23) + (!srcfileloc "fortran/trans-intrinsic.cc" 79) + nil ) + (!pair "real10_decl" + (!type already_seen 23) + (!srcfileloc "fortran/trans-intrinsic.cc" 80) + nil ) + (!pair "real16_decl" + (!type already_seen 23) + (!srcfileloc "fortran/trans-intrinsic.cc" 81) + nil ) + (!pair "complex4_decl" + (!type already_seen 23) + (!srcfileloc "fortran/trans-intrinsic.cc" 82) + nil ) + (!pair "complex8_decl" + (!type already_seen 23) + (!srcfileloc "fortran/trans-intrinsic.cc" 83) + nil ) + (!pair "complex10_decl" + (!type already_seen 23) + (!srcfileloc "fortran/trans-intrinsic.cc" 84) + nil ) + (!pair "complex16_decl" + (!type already_seen 23) + (!srcfileloc "fortran/trans-intrinsic.cc" 85) + nil ) + ) + nil 16 nil nil ) + + (!type struct 2246 nil gc_used "gfc_st_parameter_field" + (!srcfileloc "fortran/trans-io.cc" 73) + (!fields 6 + (!pair "name" + (!type already_seen 11) + (!srcfileloc "fortran/trans-io.cc" 67) + nil ) + (!pair "mask" + (!type already_seen 2) + (!srcfileloc "fortran/trans-io.cc" 68) + nil ) + (!pair "param_type" + (!type already_seen 2) + (!srcfileloc "fortran/trans-io.cc" 69) + nil ) + (!pair "type" + (!type already_seen 2) + (!srcfileloc "fortran/trans-io.cc" 70) + nil ) + (!pair "field" + (!type already_seen 23) + (!srcfileloc "fortran/trans-io.cc" 71) + nil ) + (!pair "field_len" + (!type already_seen 23) + (!srcfileloc "fortran/trans-io.cc" 72) + nil ) + ) + nil 16 nil nil ) + + (!type struct 2247 nil gc_used "gfc_st_parameter" + (!srcfileloc "fortran/trans-io.cc" 79) + (!fields 2 + (!pair "name" + (!type already_seen 11) + (!srcfileloc "fortran/trans-io.cc" 77) + nil ) + (!pair "type" + (!type already_seen 23) + (!srcfileloc "fortran/trans-io.cc" 78) + nil ) + ) + nil 16 nil nil ) + + (!type struct 2248 nil gc_unused "iter_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2249 nil gc_unused "forall_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2250 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/fortran/trans.h:33"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2251 nil gc_unused "gfc_se"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2252 nil gc_unused "gfc_co_subroutines_args"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2253 nil gc_unused "gfc_array_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2254 nil gc_unused "gfc_ss_info"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2255 nil gc_unused "gfc_ss"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2256 nil gc_unused "gfc_loopinfo"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2257 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/fortran/trans.h:404"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2258 nil gc_unused "anonymous:/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/gcc/fortran/trans.h:416"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type already_seen 2244) + + (!type already_seen 2243) + + (!type struct 2259 nil gc_unused "gimplify_omp_ctx"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2260 nil gc_used "gfc_powdecl_list" + (!srcfileloc "fortran/trans.h" 911) + (!fields 3 + (!pair "integer" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 908) + nil ) + (!pair "real" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 909) + nil ) + (!pair "cmplx" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 910) + nil ) + ) + nil 16 nil nil ) + + (!type struct 2261 nil gc_unused "gfc_interface_sym_mapping"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2262 nil gc_unused "gfc_interface_mapping"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2263 nil gc_unused "go_create_gogo_args"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2264 nil gc_unused "Linemap"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2265 nil gc_unused "Backend"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2266 nil gc_unused "ggc_root_tab"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2267 nil gc_unused "lto_file"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2268 nil gc_unused "lto_section_list"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2269 nil gc_unused "lto_section_slot"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2270 nil gc_unused "tree_scc"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2271 nil gc_unused "tree_scc_hasher"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2272 nil gc_unused "streamer_tree_cache_d"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type user_struct 2273 nil gc_unused "int_hash" + (!srcfileloc "lto/lto-common.cc" 1799) + (!fields 3 + (!pair "UINT_MAX" + (!type undefined 2274 nil gc_unused "UINT_MAX" + (!srcfileloc "lto/lto-common.cc" 1799) + ) + (!srcfileloc "lto/lto-common.cc" 1799) + nil ) + (!pair "0" + (!type already_seen 2150) + (!srcfileloc "lto/lto-common.cc" 1799) + nil ) + (!pair "unsigned" + (!type already_seen 1404) + (!srcfileloc "lto/lto-common.cc" 1799) + nil ) + ) + ) + + (!type already_seen 2274) + + (!type struct 2275 nil gc_unused "file_data_list"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2276 nil gc_unused "symbol_entry"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2277 nil gc_unused "variable_entry"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2278 nil gc_unused "function_entry"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2279 + (!type pointer 2280 nil gc_used + (!type already_seen 2279) + ) + gc_pointed_to "objc_map_private" + (!srcfileloc "objc/objc-map.h" 79) + (!fields 7 + (!pair "number_of_slots" + (!type already_seen 2) + (!srcfileloc "objc/objc-map.h" 54) + nil ) + (!pair "mask" + (!type already_seen 2) + (!srcfileloc "objc/objc-map.h" 57) + nil ) + (!pair "number_of_non_empty_slots" + (!type already_seen 2) + (!srcfileloc "objc/objc-map.h" 62) + nil ) + (!pair "max_number_of_non_empty_slots" + (!type already_seen 2) + (!srcfileloc "objc/objc-map.h" 68) + nil ) + (!pair "maximum_load_factor" + (!type already_seen 2) + (!srcfileloc "objc/objc-map.h" 71) + nil ) + (!pair "slots" + (!type already_seen 24) + (!srcfileloc "objc/objc-map.h" 74) + (!options + (!option length string "%h.number_of_slots") + ) + ) + (!pair "values" + (!type already_seen 24) + (!srcfileloc "objc/objc-map.h" 78) + (!options + (!option length string "%h.number_of_slots") + ) + ) + ) + nil 768 nil nil ) + + (!type struct 2281 + (!type pointer 2282 + (!type pointer 2283 nil gc_used + (!type already_seen 2282) + ) + gc_pointed_to + (!type already_seen 2281) + ) + gc_pointed_to "hashed_entry" + (!srcfileloc "objc/objc-act.h" 285) + (!fields 3 + (!pair "list" + (!type pointer 2284 nil gc_used + (!type struct 2285 + (!type already_seen 2284) + gc_pointed_to "hashed_attribute" + (!srcfileloc "objc/objc-act.h" 279) + (!fields 2 + (!pair "next" + (!type already_seen 2284) + (!srcfileloc "objc/objc-act.h" 277) + nil ) + (!pair "value" + (!type already_seen 23) + (!srcfileloc "objc/objc-act.h" 278) + nil ) + ) + nil 768 nil nil ) + ) + (!srcfileloc "objc/objc-act.h" 282) + nil ) + (!pair "next" + (!type already_seen 2282) + (!srcfileloc "objc/objc-act.h" 283) + nil ) + (!pair "key" + (!type already_seen 23) + (!srcfileloc "objc/objc-act.h" 284) + nil ) + ) + nil 768 nil nil ) + + (!type already_seen 2285) + + (!type struct 2286 + (!type pointer 2287 nil gc_used + (!type already_seen 2286) + ) + gc_pointed_to "imp_entry" + (!srcfileloc "objc/objc-act.h" 302) + (!fields 6 + (!pair "next" + (!type already_seen 2287) + (!srcfileloc "objc/objc-act.h" 296) + nil ) + (!pair "imp_context" + (!type already_seen 23) + (!srcfileloc "objc/objc-act.h" 297) + nil ) + (!pair "imp_template" + (!type already_seen 23) + (!srcfileloc "objc/objc-act.h" 298) + nil ) + (!pair "class_decl" + (!type already_seen 23) + (!srcfileloc "objc/objc-act.h" 299) + nil ) + (!pair "meta_decl" + (!type already_seen 23) + (!srcfileloc "objc/objc-act.h" 300) + nil ) + (!pair "has_cxx_cdtors" + (!type already_seen 2) + (!srcfileloc "objc/objc-act.h" 301) + nil ) + ) + nil 768 nil nil ) + + (!type struct 2288 nil gc_unused "objc_try_context"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2289 nil gc_pointed_to "string_descriptor" + (!srcfileloc "objc/objc-act.cc" 252) + (!fields 2 + (!pair "literal" + (!type already_seen 23) + (!srcfileloc "objc/objc-act.cc" 248) + nil ) + (!pair "constructor" + (!type already_seen 23) + (!srcfileloc "objc/objc-act.cc" 251) + nil ) + ) + (!options + (!option for_user string "") + ) + 768 nil nil ) + + (!type struct 2290 nil gc_used "objc_string_hasher" + (!srcfileloc "objc/objc-act.cc" 260) + (!fields 0 ) + nil 768 nil nil ) + + (!type user_struct 2291 + (!type pointer 2292 nil gc_used + (!type already_seen 2291) + ) + gc_pointed_to "hash_table" + (!srcfileloc "objc/objc-act.cc" 260) + (!fields 1 + (!pair "objc_string_hasher" + (!type already_seen 2290) + (!srcfileloc "objc/objc-act.cc" 260) + nil ) + ) + ) + + (!type struct 2293 nil gc_unused "decl_name_hash"nil + (!fields 0 ) + nil 0 nil nil ) + + (!type struct 2294 nil gc_used "ident_data_tuple" + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1091) + (!fields 2 + (!pair "ident" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1067) + nil ) + (!pair "data" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1068) + nil ) + ) + nil 768 nil nil ) + + (!type user_struct 2295 + (!type pointer 2296 nil gc_used + (!type already_seen 2295) + ) + gc_pointed_to "vec" + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1091) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1091) + nil ) + (!pair "ident_data_tuple" + (!type already_seen 2294) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1091) + nil ) + ) + ) + + (!type struct 2297 nil gc_used "msgref_entry" + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1258) + (!fields 3 + (!pair "func" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1253) + nil ) + (!pair "selname" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1254) + nil ) + (!pair "refdecl" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1255) + nil ) + ) + nil 768 nil nil ) + + (!type user_struct 2298 + (!type pointer 2299 nil gc_used + (!type already_seen 2298) + ) + gc_pointed_to "vec" + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1258) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1258) + nil ) + (!pair "msgref_entry" + (!type already_seen 2297) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1258) + nil ) + ) + ) + + (!type struct 2300 nil gc_used "prot_list_entry" + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1312) + (!fields 2 + (!pair "id" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1309) + nil ) + (!pair "refdecl" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1310) + nil ) + ) + nil 768 nil nil ) + + (!type user_struct 2301 + (!type pointer 2302 nil gc_used + (!type already_seen 2301) + ) + gc_pointed_to "vec" + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1312) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1312) + nil ) + (!pair "prot_list_entry" + (!type already_seen 2300) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1312) + nil ) + ) + ) + + (!type struct 2303 nil gc_used "ivarref_entry" + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2995) + (!fields 2 + (!pair "decl" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2991) + nil ) + (!pair "offset" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2992) + nil ) + ) + nil 768 nil nil ) + + (!type user_struct 2304 + (!type pointer 2305 nil gc_used + (!type already_seen 2304) + ) + gc_pointed_to "vec" + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2995) + (!fields 2 + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2995) + nil ) + (!pair "ivarref_entry" + (!type already_seen 2303) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2995) + nil ) + ) + ) +) +(!typedefs 918 + (!pair "ivarref_entry" + (!type already_seen 2303) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2995) + nil ) + (!pair "vec" + (!type already_seen 2304) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2995) + nil ) + (!pair "prot_list_entry" + (!type already_seen 2300) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1312) + nil ) + (!pair "vec" + (!type already_seen 2301) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1312) + nil ) + (!pair "msgref_entry" + (!type already_seen 2297) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1258) + nil ) + (!pair "vec" + (!type already_seen 2298) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1258) + nil ) + (!pair "ident_data_tuple" + (!type already_seen 2294) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1091) + nil ) + (!pair "vec" + (!type already_seen 2295) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1091) + nil ) + (!pair "objc_string_hasher" + (!type already_seen 2290) + (!srcfileloc "objc/objc-act.cc" 260) + nil ) + (!pair "hash_table" + (!type already_seen 2291) + (!srcfileloc "objc/objc-act.cc" 260) + nil ) + (!pair "attr" + (!type already_seen 2284) + (!srcfileloc "objc/objc-act.h" 274) + nil ) + (!pair "hash" + (!type already_seen 2282) + (!srcfileloc "objc/objc-act.h" 273) + nil ) + (!pair "objc_map_iterator_t" + (!type already_seen 2) + (!srcfileloc "objc/objc-map.h" 241) + nil ) + (!pair "objc_map_t" + (!type already_seen 2280) + (!srcfileloc "objc/objc-map.h" 90) + nil ) + (!pair "objc_map_private_hash_t" + (!type already_seen 2) + (!srcfileloc "objc/objc-map.h" 40) + nil ) + (!pair "code_id_hash" + (!type already_seen 2273) + (!srcfileloc "lto/lto-common.cc" 1799) + nil ) + (!pair "UINT_MAX" + (!type already_seen 2274) + (!srcfileloc "lto/lto-common.cc" 1799) + nil ) + (!pair "int_hash" + (!type already_seen 2273) + (!srcfileloc "lto/lto-common.cc" 1799) + nil ) + (!pair "go_char_p" + (!type already_seen 9) + (!srcfileloc "go/go-lang.cc" 199) + nil ) + (!pair "gfc_interface_mapping" + (!type already_seen 2262) + (!srcfileloc "fortran/trans.h" 1161) + nil ) + (!pair "gfc_interface_sym_mapping" + (!type already_seen 2261) + (!srcfileloc "fortran/trans.h" 1146) + nil ) + (!pair "gfc_powdecl_list" + (!type already_seen 2260) + (!srcfileloc "fortran/trans.h" 912) + nil ) + (!pair "module_decl_hasher" + (!type already_seen 2244) + (!srcfileloc "fortran/trans.h" 705) + nil ) + (!pair "hash_table" + (!type already_seen 2243) + (!srcfileloc "fortran/trans.h" 705) + nil ) + (!pair "gfc_wrapped_block" + (!type already_seen 2258) + (!srcfileloc "fortran/trans.h" 421) + nil ) + (!pair "gfc_saved_var" + (!type already_seen 2257) + (!srcfileloc "fortran/trans.h" 408) + nil ) + (!pair "gfc_loopinfo" + (!type already_seen 2256) + (!srcfileloc "fortran/trans.h" 398) + nil ) + (!pair "gfc_ss" + (!type already_seen 2255) + (!srcfileloc "fortran/trans.h" 349) + nil ) + (!pair "gfc_ss_info" + (!type already_seen 2254) + (!srcfileloc "fortran/trans.h" 308) + nil ) + (!pair "gfc_array_info" + (!type already_seen 2253) + (!srcfileloc "fortran/trans.h" 209) + nil ) + (!pair "gfc_co_subroutines_args" + (!type already_seen 2252) + (!srcfileloc "fortran/trans.h" 120) + nil ) + (!pair "gfc_se" + (!type already_seen 2251) + (!srcfileloc "fortran/trans.h" 111) + nil ) + (!pair "stmtblock_t" + (!type already_seen 2250) + (!srcfileloc "fortran/trans.h" 37) + nil ) + (!pair "forall_info" + (!type already_seen 2249) + (!srcfileloc "fortran/trans-stmt.cc" 58) + nil ) + (!pair "iter_info" + (!type already_seen 2248) + (!srcfileloc "fortran/trans-stmt.cc" 46) + nil ) + (!pair "gfc_st_parameter" + (!type already_seen 2247) + (!srcfileloc "fortran/trans-io.cc" 80) + nil ) + (!pair "gfc_st_parameter_field" + (!type already_seen 2246) + (!srcfileloc "fortran/trans-io.cc" 74) + nil ) + (!pair "gfc_intrinsic_map_t" + (!type already_seen 2245) + (!srcfileloc "fortran/trans-intrinsic.cc" 87) + nil ) + (!pair "module_hasher" + (!type already_seen 2238) + (!srcfileloc "fortran/trans-decl.cc" 5066) + nil ) + (!pair "hash_table" + (!type already_seen 2239) + (!srcfileloc "fortran/trans-decl.cc" 5066) + nil ) + (!pair "tree_frame_info" + (!type already_seen 654) + (!srcfileloc "d/d-tree.h" 345) + nil ) + (!pair "lang_identifier" + (!type already_seen 628) + (!srcfileloc "d/d-tree.h" 344) + nil ) + (!pair "Type" + (!type already_seen 533) + (!srcfileloc "d/d-tree.h" 318) + nil ) + (!pair "d_label_entry" + (!type already_seen 446) + (!srcfileloc "d/d-tree.h" 250) + nil ) + (!pair "hash_map" + (!type already_seen 445) + (!srcfileloc "d/d-tree.h" 250) + nil ) + (!pair "Module" + (!type already_seen 443) + (!srcfileloc "d/d-tree.h" 237) + nil ) + (!pair "FuncDeclaration" + (!type already_seen 441) + (!srcfileloc "d/d-tree.h" 236) + nil ) + (!pair "AggregateDeclaration" + (!type already_seen 635) + (!srcfileloc "d/d-tree.h" 213) + nil ) + (!pair "Declaration" + (!type already_seen 62) + (!srcfileloc "d/d-tree.h" 212) + nil ) + (!pair "Statement" + (!type already_seen 448) + (!srcfileloc "d/d-tree.h" 138) + nil ) + (!pair "d_label_use_entry" + (!type already_seen 450) + (!srcfileloc "d/d-tree.h" 135) + nil ) + (!pair "binding_level" + (!type already_seen 436) + (!srcfileloc "d/d-tree.h" 120) + nil ) + (!pair "Expressions" + (!type already_seen 2232) + (!srcfileloc "d/d-tree.h" 46) + nil ) + (!pair "Array" + (!type already_seen 2232) + (!srcfileloc "d/d-tree.h" 46) + nil ) + (!pair "list_hasher" + (!type already_seen 2209) + (!srcfileloc "cp/tree.cc" 2166) + nil ) + (!pair "hash_table" + (!type already_seen 2210) + (!srcfileloc "cp/tree.cc" 2166) + nil ) + (!pair "cplus_array_hasher" + (!type already_seen 2205) + (!srcfileloc "cp/tree.cc" 1034) + nil ) + (!pair "hash_table" + (!type already_seen 2206) + (!srcfileloc "cp/tree.cc" 1034) + nil ) + (!pair "deferred_access" + (!type already_seen 2199) + (!srcfileloc "cp/semantics.cc" 137) + nil ) + (!pair "vec" + (!type already_seen 2200) + (!srcfileloc "cp/semantics.cc" 137) + nil ) + (!pair "tinfo_s" + (!type already_seen 2196) + (!srcfileloc "cp/rtti.cc" 122) + nil ) + (!pair "vec" + (!type already_seen 2197) + (!srcfileloc "cp/rtti.cc" 122) + nil ) + (!pair "hash_map" + (!type already_seen 2194) + (!srcfileloc "cp/pt.cc" 29962) + nil ) + (!pair "pending_template" + (!type already_seen 2181) + (!srcfileloc "cp/pt.cc" 9599) + nil ) + (!pair "tinst_level" + (!type already_seen 1971) + (!srcfileloc "cp/pt.cc" 9590) + nil ) + (!pair "spec_hash_table" + (!type already_seen 2184) + (!srcfileloc "cp/pt.cc" 114) + nil ) + (!pair "spec_hasher" + (!type already_seen 2183) + (!srcfileloc "cp/pt.cc" 114) + nil ) + (!pair "hash_table" + (!type already_seen 2184) + (!srcfileloc "cp/pt.cc" 114) + nil ) + (!pair "tree_fn_t" + (!type already_seen 2) + (!srcfileloc "cp/pt.cc" 51) + nil ) + (!pair "cp_parser" + (!type already_seen 2015) + (!srcfileloc "cp/parser.cc" 47576) + nil ) + (!pair "iter_t" + (!type already_seen 2180) + (!srcfileloc "cp/parser.cc" 33787) + nil ) + (!pair "class_to_loc_map_t::iterator" + (!type already_seen 2180) + (!srcfileloc "cp/parser.cc" 33787) + nil ) + (!pair "cp_parser_expression_stack" + (!type array 2306 nil gc_unused "NUM_PREC_VALUES" + (!type already_seen 2175) + ) + (!srcfileloc "cp/parser.cc" 1999) + nil ) + (!pair "cp_parser_flags" + (!type already_seen 2) + (!srcfileloc "cp/parser.cc" 1930) + nil ) + (!pair "vec" + (!type already_seen 2161) + (!srcfileloc "cp/module.cc" 16714) + nil ) + (!pair "macro_export" + (!type already_seen 2159) + (!srcfileloc "cp/module.cc" 16553) + nil ) + (!pair "note_defs_table_t" + (!type already_seen 2155) + (!srcfileloc "cp/module.cc" 4481) + nil ) + (!pair "note_def_cache_hasher" + (!type already_seen 2154) + (!srcfileloc "cp/module.cc" 4481) + nil ) + (!pair "hash_table" + (!type already_seen 2155) + (!srcfileloc "cp/module.cc" 4481) + nil ) + (!pair "entity_map_t" + (!type already_seen 2149) + (!srcfileloc "cp/module.cc" 3824) + nil ) + (!pair "0" + (!type already_seen 2150) + (!srcfileloc "cp/module.cc" 3824) + nil ) + (!pair "int_hash,unsigned>>" + (!type already_seen 2149) + (!srcfileloc "cp/module.cc" 3824) + nil ) + (!pair "module_state_hash" + (!type already_seen 2144) + (!srcfileloc "cp/module.cc" 3818) + nil ) + (!pair "hash_table" + (!type already_seen 2147) + (!srcfileloc "cp/module.cc" 3818) + nil ) + (!pair "vec" + (!type already_seen 2145) + (!srcfileloc "cp/module.cc" 3815) + nil ) + (!pair "location_map_info" + (!type already_seen 2142) + (!srcfileloc "cp/module.cc" 3603) + nil ) + (!pair "module_state" + (!type already_seen 1974) + (!srcfileloc "cp/module.cc" 3397) + nil ) + (!pair "slurping" + (!type already_seen 1977) + (!srcfileloc "cp/module.cc" 3286) + nil ) + (!pair "bytes_in" + (!type already_seen 1983) + (!srcfileloc "cp/module.cc" 3275) + nil ) + (!pair "elf_in" + (!type already_seen 1982) + (!srcfileloc "cp/module.cc" 3264) + nil ) + (!pair "vl_embed" + (!type already_seen 1980) + (!srcfileloc "cp/module.cc" 3261) + nil ) + (!pair "vec" + (!type already_seen 1979) + (!srcfileloc "cp/module.cc" 3261) + nil ) + (!pair "loc_range_t" + (!type already_seen 1985) + (!srcfileloc "cp/module.cc" 3166) + nil ) + (!pair "std::pair" + (!type already_seen 1985) + (!srcfileloc "cp/module.cc" 3166) + nil ) + (!pair "range_t" + (!type already_seen 1984) + (!srcfileloc "cp/module.cc" 3163) + nil ) + (!pair "std::pair" + (!type already_seen 1984) + (!srcfileloc "cp/module.cc" 3163) + nil ) + (!pair "duplicate_hash_map" + (!type already_seen 2137) + (!srcfileloc "cp/module.cc" 2847) + nil ) + (!pair "duplicate_hash" + (!type already_seen 2136) + (!srcfileloc "cp/module.cc" 2846) + nil ) + (!pair "simple_hashmap_traits>" + (!type already_seen 2137) + (!srcfileloc "cp/module.cc" 2846) + nil ) + (!pair "attached_map_t" + (!type already_seen 2133) + (!srcfileloc "cp/module.cc" 2704) + nil ) + (!pair "auto_vec>" + (!type already_seen 2133) + (!srcfileloc "cp/module.cc" 2704) + nil ) + (!pair "pending_map_t" + (!type already_seen 2131) + (!srcfileloc "cp/module.cc" 2691) + nil ) + (!pair "auto_vec>" + (!type already_seen 2131) + (!srcfileloc "cp/module.cc" 2691) + nil ) + (!pair "ptr_int_hash_map" + (!type already_seen 2119) + (!srcfileloc "cp/module.cc" 332) + nil ) + (!pair "signed" + (!type already_seen 2120) + (!srcfileloc "cp/module.cc" 332) + nil ) + (!pair "hash_map" + (!type already_seen 2119) + (!srcfileloc "cp/module.cc" 332) + nil ) + (!pair "ptr_int_traits" + (!type already_seen 2117) + (!srcfileloc "cp/module.cc" 331) + nil ) + (!pair "nodel_ptr_hash,int>" + (!type already_seen 2117) + (!srcfileloc "cp/module.cc" 331) + nil ) + (!pair "verstr_t" + (!type array 2307 nil gc_unused "32" + (!type already_seen 8) + ) + (!srcfileloc "cp/module.cc" 299) + nil ) + (!pair "substitution_identifier_index_t" + (!type already_seen 2) + (!srcfileloc "cp/mangle.cc" 151) + nil ) + (!pair "globals" + (!type already_seen 2113) + (!srcfileloc "cp/mangle.cc" 126) + nil ) + (!pair "subsumption_hasher" + (!type already_seen 2110) + (!srcfileloc "cp/logic.cc" 751) + nil ) + (!pair "hash_table" + (!type already_seen 2111) + (!srcfileloc "cp/logic.cc" 751) + nil ) + (!pair "conv_type_hasher" + (!type already_seen 2104) + (!srcfileloc "cp/lex.cc" 813) + nil ) + (!pair "hash_table" + (!type already_seen 2105) + (!srcfileloc "cp/lex.cc" 813) + nil ) + (!pair "tree_int" + (!type already_seen 2099) + (!srcfileloc "cp/lambda.cc" 1404) + nil ) + (!pair "vec" + (!type already_seen 2100) + (!srcfileloc "cp/lambda.cc" 1404) + nil ) + (!pair "pending_noexcept" + (!type already_seen 2095) + (!srcfileloc "cp/except.cc" 1111) + nil ) + (!pair "vec" + (!type already_seen 2096) + (!srcfileloc "cp/except.cc" 1111) + nil ) + (!pair "mangled_decl_hash" + (!type already_seen 2092) + (!srcfileloc "cp/decl2.cc" 137) + nil ) + (!pair "hash_table" + (!type already_seen 2093) + (!srcfileloc "cp/decl2.cc" 137) + nil ) + (!pair "priority_info" + (!type already_seen 2091) + (!srcfileloc "cp/decl2.cc" 67) + nil ) + (!pair "typename_hasher" + (!type already_seen 2085) + (!srcfileloc "cp/decl.cc" 4043) + nil ) + (!pair "hash_table" + (!type already_seen 2086) + (!srcfileloc "cp/decl.cc" 4043) + nil ) + (!pair "incomplete_var" + (!type already_seen 2080) + (!srcfileloc "cp/decl.cc" 256) + nil ) + (!pair "vec" + (!type already_seen 2081) + (!srcfileloc "cp/decl.cc" 256) + nil ) + (!pair "named_label_use_entry" + (!type already_seen 1961) + (!srcfileloc "cp/decl.cc" 217) + nil ) + (!pair "named_label_entry" + (!type already_seen 1958) + (!srcfileloc "cp/decl.cc" 200) + nil ) + (!pair "source_location_table_entry_hash" + (!type already_seen 2077) + (!srcfileloc "cp/cp-gimplify.cc" 3233) + nil ) + (!pair "hash_table" + (!type already_seen 2078) + (!srcfileloc "cp/cp-gimplify.cc" 3233) + nil ) + (!pair "coroutine_info_hasher" + (!type already_seen 2057) + (!srcfileloc "cp/coroutines.cc" 114) + nil ) + (!pair "hash_table" + (!type already_seen 2058) + (!srcfileloc "cp/coroutines.cc" 114) + nil ) + (!pair "sat_hasher" + (!type already_seen 2052) + (!srcfileloc "cp/constraint.cc" 2530) + nil ) + (!pair "hash_table" + (!type already_seen 2053) + (!srcfileloc "cp/constraint.cc" 2530) + nil ) + (!pair "atom_hasher" + (!type already_seen 1991) + (!srcfileloc "cp/constraint.cc" 745) + nil ) + (!pair "hash_table" + (!type already_seen 2049) + (!srcfileloc "cp/constraint.cc" 745) + nil ) + (!pair "constexpr_call_hasher" + (!type already_seen 2038) + (!srcfileloc "cp/constexpr.cc" 1212) + nil ) + (!pair "hash_table" + (!type already_seen 2041) + (!srcfileloc "cp/constexpr.cc" 1212) + nil ) + (!pair "constexpr_fundef" + (!type already_seen 1992) + (!srcfileloc "cp/constexpr.cc" 1058) + nil ) + (!pair "constexpr_fundef_hasher" + (!type already_seen 2034) + (!srcfileloc "cp/constexpr.cc" 151) + nil ) + (!pair "hash_table" + (!type already_seen 2035) + (!srcfileloc "cp/constexpr.cc" 151) + nil ) + (!pair "hash_map" + (!type already_seen 2031) + (!srcfileloc "cp/class.cc" 3392) + nil ) + (!pair "subobject_offset_fn" + (!type already_seen 2) + (!srcfileloc "cp/class.cc" 104) + nil ) + (!pair "class_stack_node_t" + (!type already_seen 2026) + (!srcfileloc "cp/class.cc" 70) + nil ) + (!pair "cp_oacc_routine_data" + (!type already_seen 2011) + (!srcfileloc "cp/parser.h" 393) + nil ) + (!pair "cp_omp_declare_simd_data" + (!type already_seen 2009) + (!srcfileloc "cp/parser.h" 389) + nil ) + (!pair "cp_unparsed_functions_entry" + (!type already_seen 2006) + (!srcfileloc "cp/parser.h" 377) + nil ) + (!pair "vec" + (!type already_seen 2013) + (!srcfileloc "cp/parser.h" 377) + nil ) + (!pair "cp_parser_context" + (!type already_seen 2007) + (!srcfileloc "cp/parser.h" 267) + nil ) + (!pair "cp_lexer" + (!type already_seen 2000) + (!srcfileloc "cp/parser.h" 233) + nil ) + (!pair "cp_default_arg_entry" + (!type already_seen 2003) + (!srcfileloc "cp/parser.h" 169) + nil ) + (!pair "vec" + (!type already_seen 2004) + (!srcfileloc "cp/parser.h" 169) + nil ) + (!pair "cp_token_cache_ptr" + (!type already_seen 70) + (!srcfileloc "cp/parser.h" 141) + nil ) + (!pair "cp_token_cache" + (!type already_seen 71) + (!srcfileloc "cp/parser.h" 141) + nil ) + (!pair "vec" + (!type already_seen 1999) + (!srcfileloc "cp/parser.h" 101) + nil ) + (!pair "cp_token" + (!type already_seen 73) + (!srcfileloc "cp/parser.h" 87) + nil ) + (!pair "vec" + (!type already_seen 1997) + (!srcfileloc "cp/parser.h" 87) + nil ) + (!pair "cp_token_position" + (!type already_seen 72) + (!srcfileloc "cp/parser.h" 77) + nil ) + (!pair "cp_parameter_declarator" + (!type already_seen 1970) + (!srcfileloc "cp/cp-tree.h" 6300) + nil ) + (!pair "cp_declarator" + (!type already_seen 1969) + (!srcfileloc "cp/cp-tree.h" 6298) + nil ) + (!pair "cp_virt_specifiers" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 6164) + nil ) + (!pair "cp_cv_quals" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 6148) + nil ) + (!pair "ovl_op_info_t" + (!type already_seen 1967) + (!srcfileloc "cp/cp-tree.h" 6121) + nil ) + (!pair "base_access" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 5591) + nil ) + (!pair "tsubst_flags_t" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 5577) + nil ) + (!pair "cp_lvalue_kind" + (!type already_seen 2) + (!srcfileloc "cp/cp-tree.h" 5454) + nil ) + (!pair "named_decl_hash" + (!type already_seen 93) + (!srcfileloc "cp/cp-tree.h" 2961) + nil ) + (!pair "hash_table" + (!type already_seen 92) + (!srcfileloc "cp/cp-tree.h" 2961) + nil ) + (!pair "vec" + (!type already_seen 536) + (!srcfileloc "cp/cp-tree.h" 2378) + nil ) + (!pair "tree_pair_p" + (!type already_seen 538) + (!srcfileloc "cp/cp-tree.h" 2291) + nil ) + (!pair "tree_pair_s" + (!type already_seen 537) + (!srcfileloc "cp/cp-tree.h" 2291) + nil ) + (!pair "named_label_hash" + (!type already_seen 457) + (!srcfileloc "cp/cp-tree.h" 2081) + nil ) + (!pair "hash_table" + (!type already_seen 456) + (!srcfileloc "cp/cp-tree.h" 2081) + nil ) + (!pair "omp_declare_target_attr" + (!type already_seen 1946) + (!srcfileloc "cp/cp-tree.h" 1865) + nil ) + (!pair "vec" + (!type already_seen 1949) + (!srcfileloc "cp/cp-tree.h" 1865) + nil ) + (!pair "cxx_saved_binding" + (!type already_seen 1939) + (!srcfileloc "cp/cp-tree.h" 1822) + nil ) + (!pair "vec" + (!type already_seen 1947) + (!srcfileloc "cp/cp-tree.h" 1822) + nil ) + (!pair "deferred_access_check" + (!type already_seen 80) + (!srcfileloc "cp/cp-tree.h" 1560) + nil ) + (!pair "vec" + (!type already_seen 78) + (!srcfileloc "cp/cp-tree.h" 1560) + nil ) + (!pair "ptrmem_cst_t" + (!type already_seen 658) + (!srcfileloc "cp/cp-tree.h" 710) + nil ) + (!pair "cp_class_binding" + (!type already_seen 88) + (!srcfileloc "cp/name-lookup.h" 257) + nil ) + (!pair "vec" + (!type already_seen 87) + (!srcfileloc "cp/name-lookup.h" 257) + nil ) + (!pair "binding_cluster" + (!type already_seen 662) + (!srcfileloc "cp/name-lookup.h" 148) + nil ) + (!pair "binding_index" + (!type already_seen 664) + (!srcfileloc "cp/name-lookup.h" 129) + nil ) + (!pair "binding_slot" + (!type already_seen 666) + (!srcfileloc "cp/name-lookup.h" 87) + nil ) + (!pair "cp_binding_level" + (!type already_seen 83) + (!srcfileloc "cp/name-lookup.h" 54) + nil ) + (!pair "cxx_binding" + (!type already_seen 90) + (!srcfileloc "cp/name-lookup.h" 48) + nil ) + (!pair "matching_braces" + (!type already_seen 1934) + (!srcfileloc "c/c-parser.cc" 1123) + nil ) + (!pair "matching_brace_traits" + (!type already_seen 1933) + (!srcfileloc "c/c-parser.cc" 1123) + nil ) + (!pair "token_pair" + (!type already_seen 1934) + (!srcfileloc "c/c-parser.cc" 1123) + nil ) + (!pair "matching_parens" + (!type already_seen 1932) + (!srcfileloc "c/c-parser.cc" 1105) + nil ) + (!pair "matching_paren_traits" + (!type already_seen 1931) + (!srcfileloc "c/c-parser.cc" 1105) + nil ) + (!pair "token_pair" + (!type already_seen 1932) + (!srcfileloc "c/c-parser.cc" 1105) + nil ) + (!pair "c_parser" + (!type already_seen 1925) + (!srcfileloc "c/c-parser.cc" 267) + nil ) + (!pair "vec" + (!type already_seen 1929) + (!srcfileloc "c/c-parser.cc" 183) + nil ) + (!pair "c_token" + (!type already_seen 1923) + (!srcfileloc "c/c-parser.cc" 175) + nil ) + (!pair "gcc_options" + (!type already_seen 833) + (!srcfileloc "c-family/c-pragma.cc" 1051) + nil ) + (!pair "pending_redefinition" + (!type already_seen 1905) + (!srcfileloc "c-family/c-pragma.cc" 496) + nil ) + (!pair "vec" + (!type already_seen 1906) + (!srcfileloc "c-family/c-pragma.cc" 496) + nil ) + (!pair "pending_weak" + (!type already_seen 1902) + (!srcfileloc "c-family/c-pragma.cc" 248) + nil ) + (!pair "vec" + (!type already_seen 1903) + (!srcfileloc "c-family/c-pragma.cc" 248) + nil ) + (!pair "pragma_handler_2arg" + (!type already_seen 2) + (!srcfileloc "c-family/c-pragma.h" 212) + nil ) + (!pair "pragma_handler_1arg" + (!type already_seen 2) + (!srcfileloc "c-family/c-pragma.h" 209) + nil ) + (!pair "omp_clause_mask" + (!type already_seen 1894) + (!srcfileloc "c-family/c-common.h" 1193) + nil ) + (!pair "wide_int_bitmask" + (!type already_seen 1894) + (!srcfileloc "c-family/c-common.h" 1193) + nil ) + (!pair "bc_state_t" + (!type already_seen 1893) + (!srcfileloc "c-family/c-common.h" 1149) + nil ) + (!pair "stmt_tree" + (!type already_seen 454) + (!srcfileloc "c-family/c-common.h" 588) + nil ) + (!pair "added_includes_t" + (!type already_seen 1887) + (!srcfileloc "c-family/c-common.cc" 9226) + nil ) + (!pair "hash_map" + (!type already_seen 1887) + (!srcfileloc "c-family/c-common.cc" 9226) + nil ) + (!pair "per_file_includes_t" + (!type already_seen 1885) + (!srcfileloc "c-family/c-common.cc" 9221) + nil ) + (!pair "nofree_string_hash" + (!type already_seen 1886) + (!srcfileloc "c-family/c-common.cc" 9221) + nil ) + (!pair "hash_set" + (!type already_seen 1885) + (!srcfileloc "c-family/c-common.cc" 9221) + nil ) + (!pair "vec" + (!type already_seen 1883) + (!srcfileloc "c-family/c-common.cc" 8250) + nil ) + (!pair "tree_gc_vec" + (!type already_seen 84) + (!srcfileloc "c-family/c-common.cc" 8249) + nil ) + (!pair "vec" + (!type already_seen 1881) + (!srcfileloc "c-family/c-common.cc" 5851) + nil ) + (!pair "const_char_p" + (!type already_seen 9) + (!srcfileloc "c-family/c-common.cc" 5850) + nil ) + (!pair "c_binding_ptr" + (!type already_seen 639) + (!srcfileloc "c/c-decl.cc" 581) + nil ) + (!pair "vec" + (!type already_seen 648) + (!srcfileloc "c/c-decl.cc" 389) + nil ) + (!pair "c_goto_bindings_p" + (!type already_seen 649) + (!srcfileloc "c/c-decl.cc" 370) + nil ) + (!pair "c_expr_t" + (!type already_seen 1864) + (!srcfileloc "c/c-tree.h" 175) + nil ) + (!pair "builtin_type" + (!type already_seen 2) + (!srcfileloc "ada/gcc-interface/utils.cc" 6146) + nil ) + (!pair "pad_type_hasher" + (!type already_seen 1859) + (!srcfileloc "ada/gcc-interface/utils.cc" 327) + nil ) + (!pair "hash_table" + (!type already_seen 1860) + (!srcfileloc "ada/gcc-interface/utils.cc" 327) + nil ) + (!pair "packable_type_hasher" + (!type already_seen 1855) + (!srcfileloc "ada/gcc-interface/utils.cc" 304) + nil ) + (!pair "hash_table" + (!type already_seen 1856) + (!srcfileloc "ada/gcc-interface/utils.cc" 304) + nil ) + (!pair "atomic_acces_t" + (!type already_seen 2) + (!srcfileloc "ada/gcc-interface/trans.cc" 4169) + nil ) + (!pair "vec" + (!type already_seen 1849) + (!srcfileloc "ada/gcc-interface/trans.cc" 214) + nil ) + (!pair "loop_info" + (!type already_seen 1848) + (!srcfileloc "ada/gcc-interface/trans.cc" 211) + nil ) + (!pair "vec" + (!type already_seen 1845) + (!srcfileloc "ada/gcc-interface/trans.cc" 207) + nil ) + (!pair "range_check_info" + (!type already_seen 1844) + (!srcfileloc "ada/gcc-interface/trans.cc" 195) + nil ) + (!pair "vec" + (!type already_seen 465) + (!srcfileloc "ada/gcc-interface/trans.cc" 117) + nil ) + (!pair "parm_attr" + (!type already_seen 466) + (!srcfileloc "ada/gcc-interface/trans.cc" 113) + nil ) + (!pair "vinfo_t" + (!type already_seen 1838) + (!srcfileloc "ada/gcc-interface/decl.cc" 7847) + nil ) + (!pair "intrin_binding_t" + (!type already_seen 1836) + (!srcfileloc "ada/gcc-interface/decl.cc" 265) + nil ) + (!pair "dummy_type_hasher" + (!type already_seen 1833) + (!srcfileloc "ada/gcc-interface/decl.cc" 198) + nil ) + (!pair "hash_table" + (!type already_seen 1834) + (!srcfileloc "ada/gcc-interface/decl.cc" 198) + nil ) + (!pair "va_gc_atomic" + (!type already_seen 1830) + (!srcfileloc "ada/gcc-interface/decl.cc" 163) + nil ) + (!pair "Entity_Id" + (!type already_seen 1831) + (!srcfileloc "ada/gcc-interface/decl.cc" 163) + nil ) + (!pair "vec" + (!type already_seen 1828) + (!srcfileloc "ada/gcc-interface/decl.cc" 163) + nil ) + (!pair "value_annotation_hasher" + (!type already_seen 1825) + (!srcfileloc "ada/gcc-interface/decl.cc" 157) + nil ) + (!pair "hash_table" + (!type already_seen 1826) + (!srcfileloc "ada/gcc-interface/decl.cc" 157) + nil ) + (!pair "variant_desc" + (!type already_seen 1824) + (!srcfileloc "ada/gcc-interface/decl.cc" 132) + nil ) + (!pair "subst_pair" + (!type already_seen 1823) + (!srcfileloc "ada/gcc-interface/decl.cc" 114) + nil ) + (!pair "rewrite_fn" + (!type already_seen 2) + (!srcfileloc "ada/gcc-interface/gigi.h" 964) + nil ) + (!pair "polymorphic_call_target_hash_type" + (!type already_seen 1803) + (!srcfileloc "ipa-devirt.cc" 2733) + nil ) + (!pair "polymorphic_call_target_hasher" + (!type already_seen 1802) + (!srcfileloc "ipa-devirt.cc" 2732) + nil ) + (!pair "hash_table" + (!type already_seen 1803) + (!srcfileloc "ipa-devirt.cc" 2732) + nil ) + (!pair "vec" + (!type already_seen 1797) + (!srcfileloc "ipa-devirt.cc" 498) + nil ) + (!pair "odr_hash_type" + (!type already_seen 1796) + (!srcfileloc "ipa-devirt.cc" 491) + nil ) + (!pair "odr_name_hasher" + (!type already_seen 1795) + (!srcfileloc "ipa-devirt.cc" 491) + nil ) + (!pair "hash_table" + (!type already_seen 1796) + (!srcfileloc "ipa-devirt.cc" 491) + nil ) + (!pair "vec" + (!type already_seen 1204) + (!srcfileloc "ipa-devirt.cc" 205) + nil ) + (!pair "tree_type_map_cache_hasher" + (!type already_seen 1780) + (!srcfileloc "ubsan.cc" 82) + nil ) + (!pair "hash_table" + (!type already_seen 1781) + (!srcfileloc "ubsan.cc" 82) + nil ) + (!pair "vtbl_map_iterator_type" + (!type already_seen 1769) + (!srcfileloc "vtable-verify.cc" 299) + nil ) + (!pair "vtbl_map_table_type::iterator" + (!type already_seen 1769) + (!srcfileloc "vtable-verify.cc" 299) + nil ) + (!pair "vtbl_map_table_type" + (!type already_seen 1768) + (!srcfileloc "vtable-verify.cc" 298) + nil ) + (!pair "vtbl_map_hasher" + (!type already_seen 1767) + (!srcfileloc "vtable-verify.cc" 298) + nil ) + (!pair "hash_table" + (!type already_seen 1768) + (!srcfileloc "vtable-verify.cc" 298) + nil ) + (!pair "fast_function_summary" + (!type already_seen 1759) + (!srcfileloc "ipa-fnsummary.h" 249) + nil ) + (!pair "ipa_fn_summary_t" + (!type already_seen 1758) + (!srcfileloc "ipa-fnsummary.h" 247) + nil ) + (!pair "vec" + (!type already_seen 1194) + (!srcfileloc "ipa-fnsummary.h" 200) + nil ) + (!pair "ipa_freqcounting_predicate" + (!type already_seen 1191) + (!srcfileloc "ipa-fnsummary.h" 196) + nil ) + (!pair "vec" + (!type already_seen 1190) + (!srcfileloc "ipa-fnsummary.h" 196) + nil ) + (!pair "vec" + (!type already_seen 1188) + (!srcfileloc "ipa-fnsummary.h" 193) + nil ) + (!pair "size_time_entry" + (!type already_seen 1187) + (!srcfileloc "ipa-fnsummary.h" 189) + nil ) + (!pair "auto_vec" + (!type already_seen 1186) + (!srcfileloc "ipa-fnsummary.h" 189) + nil ) + (!pair "ipa_fn_summary" + (!type already_seen 1177) + (!srcfileloc "ipa-fnsummary.h" 126) + nil ) + (!pair "ipa_predicate" + (!type already_seen 1193) + (!srcfileloc "ipa-fnsummary.h" 117) + nil ) + (!pair "ipa_hints" + (!type already_seen 2) + (!srcfileloc "ipa-fnsummary.h" 58) + nil ) + (!pair "clause_t" + (!type already_seen 2) + (!srcfileloc "ipa-predicate.h" 113) + nil ) + (!pair "conditions" + (!type already_seen 1179) + (!srcfileloc "ipa-predicate.h" 94) + nil ) + (!pair "condition" + (!type already_seen 1181) + (!srcfileloc "ipa-predicate.h" 94) + nil ) + (!pair "vec" + (!type already_seen 1180) + (!srcfileloc "ipa-predicate.h" 94) + nil ) + (!pair "expr_eval_ops" + (!type already_seen 1182) + (!srcfileloc "ipa-predicate.h" 46) + nil ) + (!pair "expr_eval_op" + (!type already_seen 1184) + (!srcfileloc "ipa-predicate.h" 46) + nil ) + (!pair "vec" + (!type already_seen 1183) + (!srcfileloc "ipa-predicate.h" 46) + nil ) + (!pair "lto_file_decl_data_ptr" + (!type already_seen 321) + (!srcfileloc "lto-streamer.h" 607) + nil ) + (!pair "lto_section" + (!type already_seen 340) + (!srcfileloc "lto-streamer.h" 600) + nil ) + (!pair "ld_plugin_symbol_resolution" + (!type already_seen 339) + (!srcfileloc "lto-streamer.h" 594) + nil ) + (!pair "hash_map" + (!type already_seen 338) + (!srcfileloc "lto-streamer.h" 594) + nil ) + (!pair "gcov_summary" + (!type already_seen 336) + (!srcfileloc "lto-streamer.h" 591) + nil ) + (!pair "res_pair" + (!type already_seen 335) + (!srcfileloc "lto-streamer.h" 588) + nil ) + (!pair "vec" + (!type already_seen 334) + (!srcfileloc "lto-streamer.h" 588) + nil ) + (!pair "decl_state_hasher" + (!type already_seen 331) + (!srcfileloc "lto-streamer.h" 567) + nil ) + (!pair "hash_table" + (!type already_seen 330) + (!srcfileloc "lto-streamer.h" 567) + nil ) + (!pair "lto_out_decl_state_ptr" + (!type already_seen 1686) + (!srcfileloc "lto-streamer.h" 538) + nil ) + (!pair "lto_in_decl_state_ptr" + (!type already_seen 324) + (!srcfileloc "lto-streamer.h" 502) + nil ) + (!pair "lto_symtab_encoder_t" + (!type already_seen 327) + (!srcfileloc "lto-streamer.h" 468) + nil ) + (!pair "lto_free_section_data_f" + (!type already_seen 2) + (!srcfileloc "lto-streamer.h" 263) + nil ) + (!pair "lto_get_section_data_f" + (!type already_seen 2) + (!srcfileloc "lto-streamer.h" 254) + nil ) + (!pair "ld_plugin_symbol_resolution_t" + (!type already_seen 2) + (!srcfileloc "lto-streamer.h" 242) + nil ) + (!pair "lto_decl_flags_t" + (!type already_seen 8) + (!srcfileloc "lto-streamer.h" 127) + nil ) + (!pair "cgraph_node_queue" + (!type already_seen 1735) + (!srcfileloc "trans-mem.cc" 4184) + nil ) + (!pair "vec" + (!type already_seen 1735) + (!srcfileloc "trans-mem.cc" 4184) + nil ) + (!pair "tm_wrapper_hasher" + (!type already_seen 1715) + (!srcfileloc "trans-mem.cc" 468) + nil ) + (!pair "hash_table" + (!type already_seen 1716) + (!srcfileloc "trans-mem.cc" 468) + nil ) + (!pair "function_summary" + (!type already_seen 1713) + (!srcfileloc "ipa-prop.h" 1065) + nil ) + (!pair "ipa_edge_args_sum_t" + (!type already_seen 1710) + (!srcfileloc "ipa-prop.h" 1033) + nil ) + (!pair "ipa_node_params_t" + (!type already_seen 1708) + (!srcfileloc "ipa-prop.h" 1011) + nil ) + (!pair "vec" + (!type already_seen 1159) + (!srcfileloc "ipa-prop.h" 962) + nil ) + (!pair "ipa_jump_func" + (!type already_seen 1124) + (!srcfileloc "ipa-prop.h" 961) + nil ) + (!pair "vec" + (!type already_seen 1157) + (!srcfileloc "ipa-prop.h" 961) + nil ) + (!pair "ipa_edge_args" + (!type already_seen 1155) + (!srcfileloc "ipa-prop.h" 945) + nil ) + (!pair "ipcp_transformation" + (!type already_seen 1706) + (!srcfileloc "ipa-prop.h" 913) + nil ) + (!pair "ipa_vr" + (!type already_seen 1699) + (!srcfileloc "ipa-prop.h" 910) + nil ) + (!pair "vec" + (!type already_seen 1704) + (!srcfileloc "ipa-prop.h" 910) + nil ) + (!pair "vec" + (!type already_seen 1702) + (!srcfileloc "ipa-prop.h" 908) + nil ) + (!pair "ipa_agg_replacement_value" + (!type already_seen 1160) + (!srcfileloc "ipa-prop.h" 906) + nil ) + (!pair "vec" + (!type already_seen 1150) + (!srcfileloc "ipa-prop.h" 610) + nil ) + (!pair "vec" + (!type already_seen 1149) + (!srcfileloc "ipa-prop.h" 607) + nil ) + (!pair "ipa_param_descriptor" + (!type already_seen 1146) + (!srcfileloc "ipa-prop.h" 598) + nil ) + (!pair "vec" + (!type already_seen 1145) + (!srcfileloc "ipa-prop.h" 598) + nil ) + (!pair "ipa_node_params" + (!type already_seen 1143) + (!srcfileloc "ipa-prop.h" 591) + nil ) + (!pair "ipa_agg_jf_item" + (!type already_seen 1128) + (!srcfileloc "ipa-prop.h" 182) + nil ) + (!pair "vec" + (!type already_seen 1127) + (!srcfileloc "ipa-prop.h" 182) + nil ) + (!pair "int_range_max" + (!type already_seen 1696) + (!srcfileloc "value-range.h" 186) + nil ) + (!pair "255" + (!type already_seen 1697) + (!srcfileloc "value-range.h" 186) + nil ) + (!pair "int_range<255>" + (!type already_seen 1696) + (!srcfileloc "value-range.h" 186) + nil ) + (!pair "value_range" + (!type already_seen 1135) + (!srcfileloc "value-range.h" 182) + nil ) + (!pair "1" + (!type already_seen 1136) + (!srcfileloc "value-range.h" 182) + nil ) + (!pair "int_range<1>" + (!type already_seen 1135) + (!srcfileloc "value-range.h" 182) + nil ) + (!pair "int_range" + (!type already_seen 1695) + (!srcfileloc "value-range.h" 172) + nil ) + (!pair "irange" + (!type already_seen 1694) + (!srcfileloc "value-range.h" 142) + nil ) + (!pair "TRAILING_WIDE_INT_ACCESSOR" + (!type already_seen 1692) + (!srcfileloc "tree-ssanames.h" 52) + nil ) + (!pair "vec" + (!type already_seen 1689) + (!srcfileloc "tree-phinodes.cc" 70) + nil ) + (!pair "hash_map" + (!type already_seen 1687) + (!srcfileloc "cgraphclones.cc" 466) + nil ) + (!pair "uid_range_p" + (!type already_seen 1683) + (!srcfileloc "passes.cc" 1037) + nil ) + (!pair "char_ptr" + (!type already_seen 9) + (!srcfileloc "passes.cc" 917) + nil ) + (!pair "dllimport_hasher" + (!type already_seen 1665) + (!srcfileloc "config/i386/i386.cc" 11835) + nil ) + (!pair "hash_table" + (!type already_seen 1666) + (!srcfileloc "config/i386/i386.cc" 11835) + nil ) + (!pair "omp_declare_variant_alt_hasher" + (!type already_seen 1652) + (!srcfileloc "omp-general.cc" 2123) + nil ) + (!pair "hash_table" + (!type already_seen 1653) + (!srcfileloc "omp-general.cc" 2123) + nil ) + (!pair "omp_declare_variant_hasher" + (!type already_seen 1649) + (!srcfileloc "omp-general.cc" 2101) + nil ) + (!pair "hash_table" + (!type already_seen 1650) + (!srcfileloc "omp-general.cc" 2101) + nil ) + (!pair "omp_declare_variant_entry" + (!type already_seen 1645) + (!srcfileloc "omp-general.cc" 2052) + nil ) + (!pair "vec" + (!type already_seen 1646) + (!srcfileloc "omp-general.cc" 2052) + nil ) + (!pair "use_optype_p" + (!type already_seen 402) + (!srcfileloc "tree-ssa-operands.h" 42) + nil ) + (!pair "use_operand_p" + (!type already_seen 558) + (!srcfileloc "tree-ssa-operands.h" 30) + nil ) + (!pair "ssa_use_operand_t" + (!type already_seen 557) + (!srcfileloc "tree-ssa-operands.h" 30) + nil ) + (!pair "def_operand_p" + (!type already_seen 24) + (!srcfileloc "tree-ssa-operands.h" 27) + nil ) + (!pair "scev_info_hasher" + (!type already_seen 1634) + (!srcfileloc "tree-scalar-evolution.cc" 312) + nil ) + (!pair "hash_table" + (!type already_seen 1635) + (!srcfileloc "tree-scalar-evolution.cc" 312) + nil ) + (!pair "mem_addr_template" + (!type already_seen 1594) + (!srcfileloc "tree-ssa-address.cc" 95) + nil ) + (!pair "vec" + (!type already_seen 1595) + (!srcfileloc "tree-ssa-address.cc" 95) + nil ) + (!pair "treemple" + (!type already_seen 1582) + (!srcfileloc "tree-eh.cc" 53) + nil ) + (!pair "tm_restart_hasher" + (!type already_seen 406) + (!srcfileloc "gimple-ssa.h" 114) + nil ) + (!pair "hash_table" + (!type already_seen 405) + (!srcfileloc "gimple-ssa.h" 114) + nil ) + (!pair "ssa_name_hasher" + (!type already_seen 397) + (!srcfileloc "gimple-ssa.h" 96) + nil ) + (!pair "hash_table" + (!type already_seen 396) + (!srcfileloc "gimple-ssa.h" 96) + nil ) + (!pair "hash_map" + (!type already_seen 394) + (!srcfileloc "gimple-ssa.h" 84) + nil ) + (!pair "elt_t" + (!type already_seen 1579) + (!srcfileloc "gimple.h" 1669) + nil ) + (!pair "gimple_seq_node" + (!type already_seen 281) + (!srcfileloc "gimple.h" 28) + nil ) + (!pair "tm_clone_hasher" + (!type already_seen 1572) + (!srcfileloc "varasm.cc" 6374) + nil ) + (!pair "hash_table" + (!type already_seen 1573) + (!srcfileloc "varasm.cc" 6374) + nil ) + (!pair "const_rtx_desc_hasher" + (!type already_seen 1007) + (!srcfileloc "varasm.cc" 3764) + nil ) + (!pair "hash_table" + (!type already_seen 1006) + (!srcfileloc "varasm.cc" 3764) + nil ) + (!pair "tree_descriptor_hasher" + (!type already_seen 1091) + (!srcfileloc "varasm.cc" 3093) + nil ) + (!pair "hash_table" + (!type already_seen 1566) + (!srcfileloc "varasm.cc" 3093) + nil ) + (!pair "object_block_hasher" + (!type already_seen 1562) + (!srcfileloc "varasm.cc" 204) + nil ) + (!pair "hash_table" + (!type already_seen 1563) + (!srcfileloc "varasm.cc" 204) + nil ) + (!pair "section_hasher" + (!type already_seen 1559) + (!srcfileloc "varasm.cc" 193) + nil ) + (!pair "hash_table" + (!type already_seen 1560) + (!srcfileloc "varasm.cc" 193) + nil ) + (!pair "tree_vec_map_cache_hasher" + (!type already_seen 929) + (!srcfileloc "tree.cc" 248) + nil ) + (!pair "hash_table" + (!type already_seen 1556) + (!srcfileloc "tree.cc" 248) + nil ) + (!pair "tree_decl_map_cache_hasher" + (!type already_seen 928) + (!srcfileloc "tree.cc" 242) + nil ) + (!pair "hash_table" + (!type already_seen 1554) + (!srcfileloc "tree.cc" 242) + nil ) + (!pair "cl_option_hasher" + (!type already_seen 1551) + (!srcfileloc "tree.cc" 236) + nil ) + (!pair "hash_table" + (!type already_seen 1552) + (!srcfileloc "tree.cc" 236) + nil ) + (!pair "poly_int_cst_hasher" + (!type already_seen 1548) + (!srcfileloc "tree.cc" 220) + nil ) + (!pair "hash_table" + (!type already_seen 1549) + (!srcfileloc "tree.cc" 220) + nil ) + (!pair "int_cst_hasher" + (!type already_seen 1545) + (!srcfileloc "tree.cc" 209) + nil ) + (!pair "hash_table" + (!type already_seen 1546) + (!srcfileloc "tree.cc" 209) + nil ) + (!pair "type_cache_hasher" + (!type already_seen 1542) + (!srcfileloc "tree.cc" 198) + nil ) + (!pair "hash_table" + (!type already_seen 1543) + (!srcfileloc "tree.cc" 198) + nil ) + (!pair "block_info" + (!type already_seen 1532) + (!srcfileloc "reg-stack.cc" 220) + nil ) + (!pair "stack_ptr" + (!type already_seen 1530) + (!srcfileloc "reg-stack.cc" 207) + nil ) + (!pair "fixup_vertex_p" + (!type already_seen 1524) + (!srcfileloc "mcf.cc" 103) + nil ) + (!pair "fixup_vertex_type" + (!type already_seen 1523) + (!srcfileloc "mcf.cc" 103) + nil ) + (!pair "fixup_edge_p" + (!type already_seen 1522) + (!srcfileloc "mcf.cc" 94) + nil ) + (!pair "fixup_edge_type" + (!type already_seen 1521) + (!srcfileloc "mcf.cc" 94) + nil ) + (!pair "libfunc_decl_hasher" + (!type already_seen 1513) + (!srcfileloc "optabs-libfuncs.cc" 720) + nil ) + (!pair "hash_table" + (!type already_seen 1514) + (!srcfileloc "optabs-libfuncs.cc" 720) + nil ) + (!pair "vec_modify_pair_heap" + (!type already_seen 1502) + (!srcfileloc "gcse.cc" 621) + nil ) + (!pair "modify_pair" + (!type already_seen 1503) + (!srcfileloc "gcse.cc" 621) + nil ) + (!pair "vec" + (!type already_seen 1502) + (!srcfileloc "gcse.cc" 621) + nil ) + (!pair "vec_rtx_heap" + (!type already_seen 1501) + (!srcfileloc "gcse.cc" 620) + nil ) + (!pair "vec" + (!type already_seen 1501) + (!srcfileloc "gcse.cc" 620) + nil ) + (!pair "occr_t" + (!type already_seen 1496) + (!srcfileloc "gcse.cc" 306) + nil ) + (!pair "user_struct" + (!type already_seen 1492) + (!srcfileloc "ggc-tests.cc" 388) + nil ) + (!pair "test_node" + (!type already_seen 1490) + (!srcfileloc "ggc-tests.cc" 323) + nil ) + (!pair "some_other_subclass" + (!type already_seen 1488) + (!srcfileloc "ggc-tests.cc" 261) + nil ) + (!pair "some_subclass" + (!type already_seen 1486) + (!srcfileloc "ggc-tests.cc" 250) + nil ) + (!pair "example_base" + (!type already_seen 1484) + (!srcfileloc "ggc-tests.cc" 225) + nil ) + (!pair "test_of_union" + (!type already_seen 1481) + (!srcfileloc "ggc-tests.cc" 134) + nil ) + (!pair "test_other" + (!type already_seen 914) + (!srcfileloc "ggc-tests.cc" 128) + nil ) + (!pair "test_of_length" + (!type already_seen 1478) + (!srcfileloc "ggc-tests.cc" 68) + nil ) + (!pair "test_struct" + (!type already_seen 912) + (!srcfileloc "ggc-tests.cc" 42) + nil ) + (!pair "ehspec_hash_type" + (!type already_seen 1475) + (!srcfileloc "except.cc" 764) + nil ) + (!pair "ehspec_hasher" + (!type already_seen 1474) + (!srcfileloc "except.cc" 764) + nil ) + (!pair "hash_table" + (!type already_seen 1475) + (!srcfileloc "except.cc" 764) + nil ) + (!pair "ttypes_hash_type" + (!type already_seen 1473) + (!srcfileloc "except.cc" 729) + nil ) + (!pair "ttypes_filter_hasher" + (!type already_seen 1472) + (!srcfileloc "except.cc" 729) + nil ) + (!pair "hash_table" + (!type already_seen 1473) + (!srcfileloc "except.cc" 729) + nil ) + (!pair "action_hash_type" + (!type already_seen 1469) + (!srcfileloc "except.cc" 210) + nil ) + (!pair "action_record_hasher" + (!type already_seen 1468) + (!srcfileloc "except.cc" 210) + nil ) + (!pair "hash_table" + (!type already_seen 1469) + (!srcfileloc "except.cc" 210) + nil ) + (!pair "tree_hash" + (!type already_seen 933) + (!srcfileloc "except.cc" 151) + nil ) + (!pair "hash_map" + (!type already_seen 1465) + (!srcfileloc "except.cc" 151) + nil ) + (!pair "initial_value_pair" + (!type already_seen 1420) + (!srcfileloc "function.cc" 1265) + nil ) + (!pair "temp_address_hasher" + (!type already_seen 1455) + (!srcfileloc "function.cc" 608) + nil ) + (!pair "hash_table" + (!type already_seen 1456) + (!srcfileloc "function.cc" 608) + nil ) + (!pair "insn_cache_hasher" + (!type already_seen 1451) + (!srcfileloc "function.cc" 131) + nil ) + (!pair "hash_table" + (!type already_seen 1452) + (!srcfileloc "function.cc" 131) + nil ) + (!pair "by_pieces_constfn" + (!type already_seen 2) + (!srcfileloc "expr.h" 111) + nil ) + (!pair "sepops" + (!type already_seen 1449) + (!srcfileloc "expr.h" 55) + nil ) + (!pair "duplicate_eh_regions_map" + (!type already_seen 2) + (!srcfileloc "except.h" 247) + nil ) + (!pair "hash_map" + (!type already_seen 372) + (!srcfileloc "except.h" 204) + nil ) + (!pair "vec" + (!type already_seen 370) + (!srcfileloc "except.h" 200) + nil ) + (!pair "vec" + (!type already_seen 368) + (!srcfileloc "except.h" 197) + nil ) + (!pair "eh_region" + (!type already_seen 355) + (!srcfileloc "except.h" 184) + nil ) + (!pair "eh_catch" + (!type already_seen 359) + (!srcfileloc "except.h" 183) + nil ) + (!pair "eh_landing_pad" + (!type already_seen 363) + (!srcfileloc "except.h" 182) + nil ) + (!pair "twi" + (!type already_seen 907) + (!srcfileloc "emit-rtl.cc" 772) + nil ) + (!pair "const_fixed_hasher" + (!type already_seen 1439) + (!srcfileloc "emit-rtl.cc" 188) + nil ) + (!pair "hash_table" + (!type already_seen 1440) + (!srcfileloc "emit-rtl.cc" 188) + nil ) + (!pair "const_double_hasher" + (!type already_seen 1436) + (!srcfileloc "emit-rtl.cc" 179) + nil ) + (!pair "hash_table" + (!type already_seen 1437) + (!srcfileloc "emit-rtl.cc" 179) + nil ) + (!pair "reg_attr_hasher" + (!type already_seen 1433) + (!srcfileloc "emit-rtl.cc" 170) + nil ) + (!pair "hash_table" + (!type already_seen 1434) + (!srcfileloc "emit-rtl.cc" 170) + nil ) + (!pair "const_poly_int_hasher" + (!type already_seen 1430) + (!srcfileloc "emit-rtl.cc" 161) + nil ) + (!pair "hash_table" + (!type already_seen 1431) + (!srcfileloc "emit-rtl.cc" 161) + nil ) + (!pair "const_wide_int_hasher" + (!type already_seen 1427) + (!srcfileloc "emit-rtl.cc" 151) + nil ) + (!pair "hash_table" + (!type already_seen 1428) + (!srcfileloc "emit-rtl.cc" 151) + nil ) + (!pair "const_int_hasher" + (!type already_seen 1424) + (!srcfileloc "emit-rtl.cc" 143) + nil ) + (!pair "hash_table" + (!type already_seen 1425) + (!srcfileloc "emit-rtl.cc" 143) + nil ) + (!pair "vec" + (!type already_seen 1421) + (!srcfileloc "emit-rtl.h" 148) + nil ) + (!pair "rtx_note" + (!type already_seen 754) + (!srcfileloc "emit-rtl.h" 128) + nil ) + (!pair "rtl_ssa::function_info" + (!type already_seen 1415) + (!srcfileloc "emit-rtl.h" 77) + nil ) + (!pair "predefined_function_abi" + (!type already_seen 902) + (!srcfileloc "emit-rtl.h" 75) + nil ) + (!pair "temp_slot_p" + (!type already_seen 1413) + (!srcfileloc "emit-rtl.h" 24) + nil ) + (!pair "elem_op_func" + (!type already_seen 2) + (!srcfileloc "tree-vect-generic.cc" 168) + nil ) + (!pair "vec" + (!type already_seen 1406) + (!srcfileloc "btfout.cc" 105) + nil ) + (!pair "btf_datasec_t" + (!type already_seen 1405) + (!srcfileloc "btfout.cc" 86) + nil ) + (!pair "unsigned" + (!type already_seen 1404) + (!srcfileloc "btfout.cc" 73) + nil ) + (!pair "hash_map" + (!type already_seen 1402) + (!srcfileloc "btfout.cc" 73) + nil ) + (!pair "ctf_dvd_preprocess_arg_t" + (!type already_seen 1401) + (!srcfileloc "ctfout.cc" 72) + nil ) + (!pair "ctf_dtd_preprocess_arg_t" + (!type already_seen 1400) + (!srcfileloc "ctfout.cc" 66) + nil ) + (!pair "ctf_container_ref" + (!type already_seen 1399) + (!srcfileloc "ctfc.h" 339) + nil ) + (!pair "ctf_container_t" + (!type already_seen 1398) + (!srcfileloc "ctfc.h" 332) + nil ) + (!pair "ctfc_dvd_hasher" + (!type already_seen 1393) + (!srcfileloc "ctfc.h" 276) + nil ) + (!pair "hash_table" + (!type already_seen 1396) + (!srcfileloc "ctfc.h" 276) + nil ) + (!pair "ctfc_dtd_hasher" + (!type already_seen 1392) + (!srcfileloc "ctfc.h" 274) + nil ) + (!pair "hash_table" + (!type already_seen 1394) + (!srcfileloc "ctfc.h" 274) + nil ) + (!pair "ctf_srcloc_ref" + (!type already_seen 1391) + (!srcfileloc "ctfc.h" 206) + nil ) + (!pair "ctf_srcloc_t" + (!type already_seen 1390) + (!srcfileloc "ctfc.h" 204) + nil ) + (!pair "ctf_dtdef_ref" + (!type already_seen 1385) + (!srcfileloc "ctfc.h" 195) + nil ) + (!pair "ctf_dvdef_ref" + (!type already_seen 1388) + (!srcfileloc "ctfc.h" 194) + nil ) + (!pair "ctf_dvdef_t" + (!type already_seen 1387) + (!srcfileloc "ctfc.h" 192) + nil ) + (!pair "ctf_dtdef_t" + (!type already_seen 1384) + (!srcfileloc "ctfc.h" 179) + nil ) + (!pair "ctf_func_arg_t" + (!type already_seen 1381) + (!srcfileloc "ctfc.h" 150) + nil ) + (!pair "ctf_dmdef_t" + (!type already_seen 1379) + (!srcfileloc "ctfc.h" 138) + nil ) + (!pair "ctf_itype_t" + (!type already_seen 1378) + (!srcfileloc "ctfc.h" 119) + nil ) + (!pair "ctf_sliceinfo_t" + (!type already_seen 1376) + (!srcfileloc "ctfc.h" 103) + nil ) + (!pair "ctf_funcinfo_t" + (!type already_seen 1375) + (!srcfileloc "ctfc.h" 96) + nil ) + (!pair "ctf_arinfo_t" + (!type already_seen 1374) + (!srcfileloc "ctfc.h" 87) + nil ) + (!pair "ctf_encoding_t" + (!type already_seen 1373) + (!srcfileloc "ctfc.h" 78) + nil ) + (!pair "ctf_strtable_t" + (!type already_seen 1372) + (!srcfileloc "ctfc.h" 68) + nil ) + (!pair "ctf_string_t" + (!type already_seen 1370) + (!srcfileloc "ctfc.h" 57) + nil ) + (!pair "ctf_id_t" + (!type already_seen 2) + (!srcfileloc "ctfc.h" 49) + nil ) + (!pair "loc_list_hash_type" + (!type already_seen 1369) + (!srcfileloc "dwarf2out.cc" 31902) + nil ) + (!pair "loc_list_hasher" + (!type already_seen 1368) + (!srcfileloc "dwarf2out.cc" 31902) + nil ) + (!pair "hash_table" + (!type already_seen 1369) + (!srcfileloc "dwarf2out.cc" 31902) + nil ) + (!pair "macinfo_hash_type" + (!type already_seen 1366) + (!srcfileloc "dwarf2out.cc" 28847) + nil ) + (!pair "macinfo_entry_hasher" + (!type already_seen 1365) + (!srcfileloc "dwarf2out.cc" 28847) + nil ) + (!pair "hash_table" + (!type already_seen 1366) + (!srcfileloc "dwarf2out.cc" 28847) + nil ) + (!pair "inline_entry_data_hasher" + (!type already_seen 1362) + (!srcfileloc "dwarf2out.cc" 24293) + nil ) + (!pair "hash_table" + (!type already_seen 1363) + (!srcfileloc "dwarf2out.cc" 24293) + nil ) + (!pair "external_ref_hash_type" + (!type already_seen 1355) + (!srcfileloc "dwarf2out.cc" 9084) + nil ) + (!pair "external_ref_hasher" + (!type already_seen 1354) + (!srcfileloc "dwarf2out.cc" 9084) + nil ) + (!pair "hash_table" + (!type already_seen 1355) + (!srcfileloc "dwarf2out.cc" 9084) + nil ) + (!pair "decl_hash_type" + (!type already_seen 1352) + (!srcfileloc "dwarf2out.cc" 8326) + nil ) + (!pair "decl_table_entry_hasher" + (!type already_seen 1351) + (!srcfileloc "dwarf2out.cc" 8326) + nil ) + (!pair "hash_table" + (!type already_seen 1352) + (!srcfileloc "dwarf2out.cc" 8326) + nil ) + (!pair "sym_off_pair" + (!type already_seen 1347) + (!srcfileloc "dwarf2out.cc" 5938) + nil ) + (!pair "hash_map" + (!type already_seen 1348) + (!srcfileloc "dwarf2out.cc" 5938) + nil ) + (!pair "addr_hasher" + (!type already_seen 1344) + (!srcfileloc "dwarf2out.cc" 5072) + nil ) + (!pair "hash_table" + (!type already_seen 1345) + (!srcfileloc "dwarf2out.cc" 5072) + nil ) + (!pair "vec" + (!type already_seen 1338) + (!srcfileloc "dwarf2out.cc" 3707) + nil ) + (!pair "dw_ranges_by_label" + (!type already_seen 1296) + (!srcfileloc "dwarf2out.cc" 3690) + nil ) + (!pair "vec" + (!type already_seen 1336) + (!srcfileloc "dwarf2out.cc" 3690) + nil ) + (!pair "dw_ranges" + (!type already_seen 1294) + (!srcfileloc "dwarf2out.cc" 3687) + nil ) + (!pair "vec" + (!type already_seen 1334) + (!srcfileloc "dwarf2out.cc" 3687) + nil ) + (!pair "vec" + (!type already_seen 1332) + (!srcfileloc "dwarf2out.cc" 3677) + nil ) + (!pair "vec" + (!type already_seen 1330) + (!srcfileloc "dwarf2out.cc" 3669) + nil ) + (!pair "vec" + (!type already_seen 1328) + (!srcfileloc "dwarf2out.cc" 3661) + nil ) + (!pair "dw_line_info_table" + (!type already_seen 1290) + (!srcfileloc "dwarf2out.cc" 3654) + nil ) + (!pair "dw_loc_list_hasher" + (!type already_seen 1325) + (!srcfileloc "dwarf2out.cc" 3636) + nil ) + (!pair "hash_table" + (!type already_seen 1326) + (!srcfileloc "dwarf2out.cc" 3636) + nil ) + (!pair "cached_dw_loc_list" + (!type already_seen 1324) + (!srcfileloc "dwarf2out.cc" 3624) + nil ) + (!pair "decl_loc_hasher" + (!type already_seen 1321) + (!srcfileloc "dwarf2out.cc" 3605) + nil ) + (!pair "hash_table" + (!type already_seen 1322) + (!srcfileloc "dwarf2out.cc" 3605) + nil ) + (!pair "var_loc_list" + (!type already_seen 1318) + (!srcfileloc "dwarf2out.cc" 3583) + nil ) + (!pair "die_arg_entry" + (!type already_seen 1315) + (!srcfileloc "dwarf2out.cc" 3544) + nil ) + (!pair "block_die_hasher" + (!type already_seen 1312) + (!srcfileloc "dwarf2out.cc" 3539) + nil ) + (!pair "hash_table" + (!type already_seen 1313) + (!srcfileloc "dwarf2out.cc" 3539) + nil ) + (!pair "variable_value_hasher" + (!type already_seen 1309) + (!srcfileloc "dwarf2out.cc" 3529) + nil ) + (!pair "hash_table" + (!type already_seen 1310) + (!srcfileloc "dwarf2out.cc" 3529) + nil ) + (!pair "vec" + (!type already_seen 1306) + (!srcfileloc "dwarf2out.cc" 3516) + nil ) + (!pair "decl_die_hasher" + (!type already_seen 1303) + (!srcfileloc "dwarf2out.cc" 3512) + nil ) + (!pair "hash_table" + (!type already_seen 1304) + (!srcfileloc "dwarf2out.cc" 3512) + nil ) + (!pair "dwarf_file_hasher" + (!type already_seen 1300) + (!srcfileloc "dwarf2out.cc" 3501) + nil ) + (!pair "hash_table" + (!type already_seen 1301) + (!srcfileloc "dwarf2out.cc" 3501) + nil ) + (!pair "skeleton_chain_node" + (!type already_seen 1299) + (!srcfileloc "dwarf2out.cc" 3293) + nil ) + (!pair "limbo_die_node" + (!type already_seen 1297) + (!srcfileloc "dwarf2out.cc" 3285) + nil ) + (!pair "macinfo_entry" + (!type already_seen 1295) + (!srcfileloc "dwarf2out.cc" 3257) + nil ) + (!pair "pubname_entry" + (!type already_seen 1293) + (!srcfileloc "dwarf2out.cc" 3229) + nil ) + (!pair "die_node" + (!type already_seen 488) + (!srcfileloc "dwarf2out.cc" 3198) + nil ) + (!pair "vec" + (!type already_seen 494) + (!srcfileloc "dwarf2out.cc" 3176) + nil ) + (!pair "comdat_type_node" + (!type already_seen 491) + (!srcfileloc "dwarf2out.cc" 3173) + nil ) + (!pair "vec" + (!type already_seen 1288) + (!srcfileloc "dwarf2out.cc" 3123) + nil ) + (!pair "dw_line_info_entry" + (!type already_seen 1287) + (!srcfileloc "dwarf2out.cc" 3081) + nil ) + (!pair "dw_offset" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3035) + nil ) + (!pair "dw_loc_list_node" + (!type already_seen 486) + (!srcfileloc "dwarf2out.cc" 1391) + nil ) + (!pair "addr_table_entry" + (!type already_seen 482) + (!srcfileloc "dwarf2out.cc" 1364) + nil ) + (!pair "var_loc_view" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 1356) + nil ) + (!pair "vec" + (!type already_seen 1285) + (!srcfileloc "dwarf2out.cc" 275) + nil ) + (!pair "indirect_string_hasher" + (!type already_seen 1282) + (!srcfileloc "dwarf2out.cc" 233) + nil ) + (!pair "hash_table" + (!type already_seen 1283) + (!srcfileloc "dwarf2out.cc" 233) + nil ) + (!pair "vec" + (!type already_seen 1280) + (!srcfileloc "dwarf2out.cc" 215) + nil ) + (!pair "reg_saved_in_data" + (!type already_seen 1273) + (!srcfileloc "dwarf2cfi.cc" 195) + nil ) + (!pair "dw_cfi_row" + (!type already_seen 1271) + (!srcfileloc "dwarf2cfi.cc" 193) + nil ) + (!pair "dw_cfa_location" + (!type already_seen 513) + (!srcfileloc "dwarf2cfi.cc" 66) + nil ) + (!pair "hash_map" + (!type already_seen 1269) + (!srcfileloc "dwarf2asm.cc" 911) + nil ) + (!pair "dw_attr_node" + (!type already_seen 495) + (!srcfileloc "dwarf2out.h" 435) + nil ) + (!pair "dw_val_node" + (!type already_seen 479) + (!srcfileloc "dwarf2out.h" 297) + nil ) + (!pair "dw_discr_value" + (!type already_seen 508) + (!srcfileloc "dwarf2out.h" 276) + nil ) + (!pair "dw_vec_const" + (!type already_seen 500) + (!srcfileloc "dwarf2out.h" 256) + nil ) + (!pair "cfa_reg" + (!type already_seen 514) + (!srcfileloc "dwarf2out.h" 133) + nil ) + (!pair "dw_fde_ref" + (!type already_seen 470) + (!srcfileloc "dwarf2out.h" 70) + nil ) + (!pair "cfi_vec" + (!type already_seen 472) + (!srcfileloc "dwarf2out.h" 68) + nil ) + (!pair "vec" + (!type already_seen 473) + (!srcfileloc "dwarf2out.h" 68) + nil ) + (!pair "dw_cfi_oprnd" + (!type already_seen 476) + (!srcfileloc "dwarf2out.h" 57) + nil ) + (!pair "wide_int_ptr" + (!type already_seen 496) + (!srcfileloc "dwarf2out.h" 33) + nil ) + (!pair "dw_discr_list_ref" + (!type already_seen 510) + (!srcfileloc "dwarf2out.h" 32) + nil ) + (!pair "dw_loc_list_ref" + (!type already_seen 485) + (!srcfileloc "dwarf2out.h" 31) + nil ) + (!pair "dw_loc_descr_ref" + (!type already_seen 477) + (!srcfileloc "dwarf2out.h" 30) + nil ) + (!pair "dw_cfi_ref" + (!type already_seen 474) + (!srcfileloc "dwarf2out.h" 29) + nil ) + (!pair "dw_val_ref" + (!type already_seen 480) + (!srcfileloc "dwarf2out.h" 28) + nil ) + (!pair "const_dw_die_ref" + (!type already_seen 487) + (!srcfileloc "dwarf2out.h" 26) + nil ) + (!pair "dw_die_ref" + (!type already_seen 487) + (!srcfileloc "dwarf2out.h" 25) + nil ) + (!pair "nowarn_map_t" + (!type already_seen 1265) + (!srcfileloc "diagnostic-spec.h" 135) + nil ) + (!pair "nowarn_spec_t" + (!type already_seen 1264) + (!srcfileloc "diagnostic-spec.h" 135) + nil ) + (!pair "hash_map" + (!type already_seen 1265) + (!srcfileloc "diagnostic-spec.h" 135) + nil ) + (!pair "modref_tree" + (!type already_seen 1263) + (!srcfileloc "ipa-modref-tree.h" 738) + nil ) + (!pair "modref_base_node" + (!type already_seen 1262) + (!srcfileloc "ipa-modref-tree.h" 288) + nil ) + (!pair "modref_ref_node" + (!type already_seen 1261) + (!srcfileloc "ipa-modref-tree.h" 205) + nil ) + (!pair "modref_summary_lto" + (!type already_seen 1241) + (!srcfileloc "ipa-modref.cc" 368) + nil ) + (!pair "modref_records_lto" + (!type already_seen 1244) + (!srcfileloc "ipa-modref.cc" 350) + nil ) + (!pair "modref_tree" + (!type already_seen 1244) + (!srcfileloc "ipa-modref.cc" 350) + nil ) + (!pair "fast_function_summary" + (!type already_seen 1248) + (!srcfileloc "ipa-modref.cc" 272) + nil ) + (!pair "fast_function_summary" + (!type already_seen 1246) + (!srcfileloc "ipa-modref.cc" 260) + nil ) + (!pair "modref_summaries_lto" + (!type already_seen 1245) + (!srcfileloc "ipa-modref.cc" 255) + nil ) + (!pair "modref_summaries" + (!type already_seen 1240) + (!srcfileloc "ipa-modref.cc" 230) + nil ) + (!pair "modref_summary" + (!type already_seen 1233) + (!srcfileloc "ipa-modref.h" 67) + nil ) + (!pair "auto_vec" + (!type already_seen 1232) + (!srcfileloc "ipa-modref.h" 34) + nil ) + (!pair "modref_access_node" + (!type already_seen 1231) + (!srcfileloc "ipa-modref.h" 33) + nil ) + (!pair "auto_vec" + (!type already_seen 1230) + (!srcfileloc "ipa-modref.h" 33) + nil ) + (!pair "eaf_flags_t" + (!type already_seen 2) + (!srcfileloc "ipa-modref.h" 24) + nil ) + (!pair "modref_records" + (!type already_seen 1228) + (!srcfileloc "ipa-modref.h" 23) + nil ) + (!pair "modref_tree" + (!type already_seen 1228) + (!srcfileloc "ipa-modref.h" 23) + nil ) + (!pair "ipa_sra_function_summaries" + (!type already_seen 1219) + (!srcfileloc "ipa-sra.cc" 383) + nil ) + (!pair "isra_param_desc" + (!type already_seen 1212) + (!srcfileloc "ipa-sra.cc" 256) + nil ) + (!pair "vec" + (!type already_seen 1216) + (!srcfileloc "ipa-sra.cc" 256) + nil ) + (!pair "isra_func_summary" + (!type already_seen 1214) + (!srcfileloc "ipa-sra.cc" 240) + nil ) + (!pair "vec" + (!type already_seen 1210) + (!srcfileloc "ipa-sra.cc" 167) + nil ) + (!pair "ipa_adjusted_param" + (!type already_seen 1075) + (!srcfileloc "ipa-param-manipulation.h" 254) + nil ) + (!pair "vec" + (!type already_seen 1074) + (!srcfileloc "ipa-param-manipulation.h" 254) + nil ) + (!pair "odr_type" + (!type already_seen 1203) + (!srcfileloc "ipa-utils.h" 62) + nil ) + (!pair "odr_type_d" + (!type already_seen 1202) + (!srcfileloc "ipa-utils.h" 62) + nil ) + (!pair "ipa_vr_ggc_hash_traits" + (!type already_seen 1119) + (!srcfileloc "ipa-prop.cc" 156) + nil ) + (!pair "hash_table" + (!type already_seen 1120) + (!srcfileloc "ipa-prop.cc" 156) + nil ) + (!pair "ipa_bit_ggc_hash_traits" + (!type already_seen 1116) + (!srcfileloc "ipa-prop.cc" 109) + nil ) + (!pair "hash_table" + (!type already_seen 1117) + (!srcfileloc "ipa-prop.cc" 109) + nil ) + (!pair "function_version_hasher" + (!type already_seen 1112) + (!srcfileloc "cgraph.cc" 122) + nil ) + (!pair "hash_table" + (!type already_seen 1113) + (!srcfileloc "cgraph.cc" 122) + nil ) + (!pair "vec" + (!type already_seen 1106) + (!srcfileloc "alias.cc" 280) + nil ) + (!pair "alias_set_hash" + (!type already_seen 1101) + (!srcfileloc "alias.cc" 148) + nil ) + (!pair "hash_map" + (!type already_seen 1102) + (!srcfileloc "alias.cc" 148) + nil ) + (!pair "ipa_param_adjustments" + (!type already_seen 1072) + (!srcfileloc "symtab-clones.h" 36) + nil ) + (!pair "vec" + (!type already_seen 1070) + (!srcfileloc "symtab-clones.h" 34) + nil ) + (!pair "clone_info" + (!type already_seen 1067) + (!srcfileloc "symtab-clones.h" 27) + nil ) + (!pair "thunk_infos_t" + (!type already_seen 1096) + (!srcfileloc "symtab-thunks.cc" 78) + nil ) + (!pair "unprocessed_thunk" + (!type already_seen 1093) + (!srcfileloc "symtab-thunks.cc" 62) + nil ) + (!pair "vec" + (!type already_seen 1094) + (!srcfileloc "symtab-thunks.cc" 62) + nil ) + (!pair "thunk_info" + (!type already_seen 1063) + (!srcfileloc "symtab-thunks.h" 38) + nil ) + (!pair "hash_set" + (!type already_seen 1089) + (!srcfileloc "cgraph.h" 2503) + nil ) + (!pair "FILE" + (!type already_seen 1088) + (!srcfileloc "cgraph.h" 2499) + nil ) + (!pair "symbol_priority_map" + (!type already_seen 1036) + (!srcfileloc "cgraph.h" 2497) + nil ) + (!pair "hash_map" + (!type already_seen 1086) + (!srcfileloc "cgraph.h" 2497) + nil ) + (!pair "asmname_hasher" + (!type already_seen 1062) + (!srcfileloc "cgraph.h" 2494) + nil ) + (!pair "hash_table" + (!type already_seen 1084) + (!srcfileloc "cgraph.h" 2494) + nil ) + (!pair "section_name_hasher" + (!type already_seen 1034) + (!srcfileloc "cgraph.h" 2491) + nil ) + (!pair "hash_table" + (!type already_seen 1082) + (!srcfileloc "cgraph.h" 2491) + nil ) + (!pair "vec" + (!type already_seen 1080) + (!srcfileloc "cgraph.h" 2461) + nil ) + (!pair "cgraph_2node_hook_list" + (!type already_seen 1060) + (!srcfileloc "cgraph.h" 2381) + nil ) + (!pair "cgraph_2edge_hook_list" + (!type already_seen 1058) + (!srcfileloc "cgraph.h" 2375) + nil ) + (!pair "varpool_node_hook_list" + (!type already_seen 1056) + (!srcfileloc "cgraph.h" 2354) + nil ) + (!pair "cgraph_node_hook_list" + (!type already_seen 1054) + (!srcfileloc "cgraph.h" 2347) + nil ) + (!pair "cgraph_edge_hook_list" + (!type already_seen 1052) + (!srcfileloc "cgraph.h" 2340) + nil ) + (!pair "symbol_table" + (!type already_seen 1078) + (!srcfileloc "cgraph.h" 2214) + nil ) + (!pair "clone_summary" + (!type already_seen 1076) + (!srcfileloc "cgraph.h" 2205) + nil ) + (!pair "function_summary" + (!type already_seen 1076) + (!srcfileloc "cgraph.h" 2205) + nil ) + (!pair "thunk_summary" + (!type already_seen 1065) + (!srcfileloc "cgraph.h" 2201) + nil ) + (!pair "function_summary" + (!type already_seen 1065) + (!srcfileloc "cgraph.h" 2201) + nil ) + (!pair "cgraph_2node_hook" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2154) + nil ) + (!pair "cgraph_2edge_hook" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2153) + nil ) + (!pair "varpool_node_hook" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2152) + nil ) + (!pair "cgraph_node_hook" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2151) + nil ) + (!pair "cgraph_edge_hook" + (!type already_seen 2) + (!srcfileloc "cgraph.h" 2150) + nil ) + (!pair "asm_node" + (!type already_seen 1050) + (!srcfileloc "cgraph.h" 2123) + nil ) + (!pair "availability" + (!type already_seen 1049) + (!srcfileloc "cgraph.h" 2007) + nil ) + (!pair "cgraph_indirect_call_info" + (!type already_seen 816) + (!srcfileloc "cgraph.h" 1897) + nil ) + (!pair "gcall" + (!type already_seen 774) + (!srcfileloc "cgraph.h" 1894) + nil ) + (!pair "ipa_polymorphic_call_context" + (!type already_seen 817) + (!srcfileloc "cgraph.h" 1569) + nil ) + (!pair "varpool_node_set" + (!type already_seen 1046) + (!srcfileloc "cgraph.h" 1516) + nil ) + (!pair "cgraph_node_set" + (!type already_seen 1044) + (!srcfileloc "cgraph.h" 1515) + nil ) + (!pair "cgraph_node_set_def" + (!type already_seen 1043) + (!srcfileloc "cgraph.h" 1515) + nil ) + (!pair "vl_ptr" + (!type already_seen 826) + (!srcfileloc "cgraph.h" 1410) + nil ) + (!pair "va_heap" + (!type already_seen 827) + (!srcfileloc "cgraph.h" 1410) + nil ) + (!pair "vec" + (!type already_seen 825) + (!srcfileloc "cgraph.h" 1410) + nil ) + (!pair "cgraph_simd_clone" + (!type already_seen 822) + (!srcfileloc "cgraph.h" 1403) + nil ) + (!pair "cgraph_edge_hasher" + (!type already_seen 820) + (!srcfileloc "cgraph.h" 1397) + nil ) + (!pair "hash_table" + (!type already_seen 819) + (!srcfileloc "cgraph.h" 1397) + nil ) + (!pair "auto_vec" + (!type already_seen 1042) + (!srcfileloc "cgraph.h" 1153) + nil ) + (!pair "cgraph_edge" + (!type already_seen 814) + (!srcfileloc "cgraph.h" 1128) + nil ) + (!pair "cgraph_function_version_info" + (!type already_seen 1040) + (!srcfileloc "cgraph.h" 824) + nil ) + (!pair "cgraph_simd_clone_arg" + (!type already_seen 824) + (!srcfileloc "cgraph.h" 814) + nil ) + (!pair "cgraph_node" + (!type already_seen 811) + (!srcfileloc "cgraph.h" 808) + nil ) + (!pair "section_hash_entry" + (!type already_seen 342) + (!srcfileloc "cgraph.h" 637) + nil ) + (!pair "ipa_ref_list" + (!type already_seen 320) + (!srcfileloc "cgraph.h" 621) + nil ) + (!pair "symtab_node" + (!type already_seen 319) + (!srcfileloc "cgraph.h" 292) + nil ) + (!pair "ipa_ref" + (!type already_seen 1032) + (!srcfileloc "cgraph.h" 170) + nil ) + (!pair "explicit" + (!type already_seen 1035) + (!srcfileloc "cgraph.h" 113) + nil ) + (!pair "ipa_opt_pass" + (!type already_seen 828) + (!srcfileloc "cgraph.h" 38) + nil ) + (!pair "ipa_opt_pass_d" + (!type already_seen 829) + (!srcfileloc "cgraph.h" 38) + nil ) + (!pair "ipa_ref_t" + (!type already_seen 1032) + (!srcfileloc "ipa-ref.h" 70) + nil ) + (!pair "__assert_gimple_bb_smaller_rtl_bb" + (!type array 2308 nil gc_unused "(int) sizeof (struct rtl_bb_info)\n - (int) sizeof (struct gimple_bb_info)" + (!type already_seen 2) + ) + (!srcfileloc "basic-block.h" 162) + nil ) + (!pair "vec" + (!type already_seen 271) + (!srcfileloc "basic-block.h" 119) + nil ) + (!pair "edge_def" + (!type already_seen 273) + (!srcfileloc "basic-block.h" 53) + nil ) + (!pair "cselib_expand_callback" + (!type already_seen 2) + (!srcfileloc "cselib.h" 88) + nil ) + (!pair "sreal" + (!type already_seen 1026) + (!srcfileloc "profile-count.h" 1209) + nil ) + (!pair "profile_probability" + (!type already_seen 1027) + (!srcfileloc "profile-count.h" 637) + nil ) + (!pair "profile_count" + (!type already_seen 300) + (!srcfileloc "cfg.h" 76) + nil ) + (!pair "vec" + (!type already_seen 380) + (!srcfileloc "cfg.h" 45) + nil ) + (!pair "loop_exit_hasher" + (!type already_seen 411) + (!srcfileloc "cfgloop.h" 331) + nil ) + (!pair "hash_table" + (!type already_seen 410) + (!srcfileloc "cfgloop.h" 331) + nil ) + (!pair "vec" + (!type already_seen 278) + (!srcfileloc "cfgloop.h" 142) + nil ) + (!pair "loop_p" + (!type already_seen 274) + (!srcfileloc "cfgloop.h" 95) + nil ) + (!pair "noswitch_section_callback" + (!type already_seen 2) + (!srcfileloc "output.h" 499) + nil ) + (!pair "unnamed_section_callback" + (!type already_seen 2) + (!srcfileloc "output.h" 473) + nil ) + (!pair "__gcc_host_wide_int__" + (!type already_seen 2) + (!srcfileloc "hwint.h" 77) + nil ) + (!pair "used_type_hasher" + (!type already_seen 1013) + (!srcfileloc "function.h" 491) + nil ) + (!pair "hash_table" + (!type already_seen 1014) + (!srcfileloc "function.h" 491) + nil ) + (!pair "range_query" + (!type already_seen 516) + (!srcfileloc "function.h" 310) + nil ) + (!pair "hash_set" + (!type already_seen 469) + (!srcfileloc "function.h" 299) + nil ) + (!pair "callinfo_dalloc" + (!type already_seen 419) + (!srcfileloc "function.h" 234) + nil ) + (!pair "vec" + (!type already_seen 418) + (!srcfileloc "function.h" 234) + nil ) + (!pair "callinfo_callee" + (!type already_seen 416) + (!srcfileloc "function.h" 230) + nil ) + (!pair "vec" + (!type already_seen 415) + (!srcfileloc "function.h" 230) + nil ) + (!pair "vec" + (!type already_seen 999) + (!srcfileloc "function.h" 147) + nil ) + (!pair "vec" + (!type already_seen 376) + (!srcfileloc "function.h" 145) + nil ) + (!pair "rtx_code_label" + (!type already_seen 366) + (!srcfileloc "function.h" 140) + nil ) + (!pair "call_site_record" + (!type already_seen 998) + (!srcfileloc "function.h" 134) + nil ) + (!pair "vec" + (!type already_seen 994) + (!srcfileloc "function.h" 131) + nil ) + (!pair "ht_cb" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/symtab.h" 89) + nil ) + (!pair "hashnode" + (!type already_seen 16) + (!srcfileloc "../libcpp/include/symtab.h" 41) + nil ) + (!pair "cpp_hash_table" + (!type already_seen 986) + (!srcfileloc "../libcpp/include/symtab.h" 40) + nil ) + (!pair "ht_identifier_ptr" + (!type already_seen 16) + (!srcfileloc "../libcpp/include/symtab.h" 30) + nil ) + (!pair "ht_identifier" + (!type already_seen 15) + (!srcfileloc "../libcpp/include/symtab.h" 29) + nil ) + (!pair "libfunc_hasher" + (!type already_seen 607) + (!srcfileloc "libfuncs.h" 61) + nil ) + (!pair "hash_table" + (!type already_seen 606) + (!srcfileloc "libfuncs.h" 61) + nil ) + (!pair "builtin_info_type" + (!type already_seen 981) + (!srcfileloc "tree-core.h" 2311) + nil ) + (!pair "alias_pair" + (!type already_seen 969) + (!srcfileloc "tree-core.h" 2275) + nil ) + (!pair "vec" + (!type already_seen 983) + (!srcfileloc "tree-core.h" 2275) + nil ) + (!pair "record_layout_info" + (!type already_seen 972) + (!srcfileloc "tree-core.h" 2181) + nil ) + (!pair "vec" + (!type already_seen 85) + (!srcfileloc "tree-core.h" 1653) + nil ) + (!pair "constructor_elt" + (!type already_seen 568) + (!srcfileloc "tree-core.h" 1521) + nil ) + (!pair "vec" + (!type already_seen 567) + (!srcfileloc "tree-core.h" 1521) + nil ) + (!pair "walk_tree_lh" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 997) + nil ) + (!pair "walk_tree_fn" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 993) + nil ) + (!pair "priority_type" + (!type already_seen 2) + (!srcfileloc "tree-core.h" 990) + nil ) + (!pair "poly_int_traits" + (!type already_seen 965) + (!srcfileloc "tree.h" 6293) + nil ) + (!pair "typename" + (!type already_seen 965) + (!srcfileloc "tree.h" 6293) + nil ) + (!pair "widest2_int_cst" + (!type already_seen 962) + (!srcfileloc "tree.h" 6043) + nil ) + (!pair "generic_wide_int>" + (!type already_seen 962) + (!srcfileloc "tree.h" 6042) + nil ) + (!pair "unextended" + (!type already_seen 961) + (!srcfileloc "tree.h" 6035) + nil ) + (!pair "generic_wide_int" + (!type already_seen 961) + (!srcfileloc "tree.h" 6035) + nil ) + (!pair "extended" + (!type already_seen 958) + (!srcfileloc "tree.h" 6028) + nil ) + (!pair "N" + (!type already_seen 960) + (!srcfileloc "tree.h" 6028) + nil ) + (!pair "extended_tree>" + (!type already_seen 958) + (!srcfileloc "tree.h" 6028) + nil ) + (!pair "tree_to_poly_wide_ref" + (!type already_seen 956) + (!srcfileloc "tree.h" 6019) + nil ) + (!pair "unextended_tree" + (!type already_seen 945) + (!srcfileloc "tree.h" 6018) + nil ) + (!pair "generic_wide_int>" + (!type already_seen 956) + (!srcfileloc "tree.h" 6018) + nil ) + (!pair "tree_to_poly_offset_ref" + (!type already_seen 954) + (!srcfileloc "tree.h" 6016) + nil ) + (!pair "generic_wide_int>" + (!type already_seen 954) + (!srcfileloc "tree.h" 6015) + nil ) + (!pair "tree_to_poly_widest_ref" + (!type already_seen 952) + (!srcfileloc "tree.h" 6013) + nil ) + (!pair "generic_wide_int>" + (!type already_seen 952) + (!srcfileloc "tree.h" 6012) + nil ) + (!pair "tree_to_wide_ref" + (!type already_seen 951) + (!srcfileloc "tree.h" 6004) + nil ) + (!pair "generic_wide_int>" + (!type already_seen 951) + (!srcfileloc "tree.h" 6003) + nil ) + (!pair "tree_to_offset_ref" + (!type already_seen 950) + (!srcfileloc "tree.h" 6002) + nil ) + (!pair "generic_wide_int" + (!type already_seen 950) + (!srcfileloc "tree.h" 6002) + nil ) + (!pair "tree_to_widest_ref" + (!type already_seen 949) + (!srcfileloc "tree.h" 6001) + nil ) + (!pair "generic_wide_int" + (!type already_seen 949) + (!srcfileloc "tree.h" 6001) + nil ) + (!pair "offset_extended_tree" + (!type already_seen 948) + (!srcfileloc "tree.h" 5999) + nil ) + (!pair "extended_tree" + (!type already_seen 948) + (!srcfileloc "tree.h" 5999) + nil ) + (!pair "widest_extended_tree" + (!type already_seen 947) + (!srcfileloc "tree.h" 5998) + nil ) + (!pair "extended_tree" + (!type already_seen 947) + (!srcfileloc "tree.h" 5998) + nil ) + (!pair "decl_tree_map" + (!type already_seen 943) + (!srcfileloc "tree.h" 5670) + nil ) + (!pair "decl_tree_traits" + (!type already_seen 942) + (!srcfileloc "tree.h" 5670) + nil ) + (!pair "hash_map" + (!type already_seen 943) + (!srcfileloc "tree.h" 5670) + nil ) + (!pair "type_tree_cache_map" + (!type already_seen 940) + (!srcfileloc "tree.h" 5665) + nil ) + (!pair "type_tree_cache_traits" + (!type already_seen 939) + (!srcfileloc "tree.h" 5665) + nil ) + (!pair "hash_map" + (!type already_seen 940) + (!srcfileloc "tree.h" 5665) + nil ) + (!pair "decl_tree_cache_map" + (!type already_seen 937) + (!srcfileloc "tree.h" 5659) + nil ) + (!pair "decl_tree_cache_traits" + (!type already_seen 936) + (!srcfileloc "tree.h" 5659) + nil ) + (!pair "hash_map" + (!type already_seen 937) + (!srcfileloc "tree.h" 5659) + nil ) + (!pair "tree_cache_map" + (!type already_seen 935) + (!srcfileloc "tree.h" 5653) + nil ) + (!pair "tree_cache_traits" + (!type already_seen 934) + (!srcfileloc "tree.h" 5653) + nil ) + (!pair "hash_map" + (!type already_seen 935) + (!srcfileloc "tree.h" 5653) + nil ) + (!pair "hash_rtx_callback_function" + (!type already_seen 2) + (!srcfileloc "rtl.h" 3708) + nil ) + (!pair "rtx_equal_p_callback_function" + (!type already_seen 2) + (!srcfileloc "rtl.h" 3703) + nil ) + (!pair "for_each_inc_dec_fn" + (!type already_seen 2) + (!srcfileloc "rtl.h" 3699) + nil ) + (!pair "rtx_to_poly_wide_ref" + (!type already_seen 920) + (!srcfileloc "rtl.h" 2341) + nil ) + (!pair "generic_wide_int>>" + (!type already_seen 920) + (!srcfileloc "rtl.h" 2340) + nil ) + (!pair "rtx_mode_t" + (!type already_seen 919) + (!srcfileloc "rtl.h" 2252) + nil ) + (!pair "std::pair" + (!type already_seen 919) + (!srcfileloc "rtl.h" 2252) + nil ) + (!pair "rtx_insn" + (!type already_seen 297) + (!srcfileloc "rtl.h" 496) + nil ) + (!pair "rtx_insn_list" + (!type already_seen 745) + (!srcfileloc "rtl.h" 493) + nil ) + (!pair "rtx_expr_list" + (!type already_seen 743) + (!srcfileloc "rtl.h" 464) + nil ) + (!pair "NUM_POLY_INT_COEFFS" + (!type already_seen 908) + (!srcfileloc "rtl.h" 291) + nil ) + (!pair "trailing_wide_ints" + (!type already_seen 907) + (!srcfileloc "rtl.h" 291) + nil ) + (!pair "va_gc" + (!type already_seen 79) + (!srcfileloc "rtl.h" 267) + nil ) + (!pair "vec" + (!type already_seen 221) + (!srcfileloc "rtl.h" 267) + nil ) + (!pair "rtunion" + (!type already_seen 210) + (!srcfileloc "rtl.h" 237) + nil ) + (!pair "reg_attrs" + (!type already_seen 239) + (!srcfileloc "rtl.h" 229) + nil ) + (!pair "mem_attrs" + (!type already_seen 228) + (!srcfileloc "rtl.h" 153) + nil ) + (!pair "trailing_wide_ints" + (!type already_seen 892) + (!srcfileloc "wide-int.h" 1408) + nil ) + (!pair "trailing_wide_int" + (!type already_seen 891) + (!srcfileloc "wide-int.h" 1366) + nil ) + (!pair "trailing_wide_int_storage" + (!type already_seen 890) + (!srcfileloc "wide-int.h" 1366) + nil ) + (!pair "generic_wide_int" + (!type already_seen 891) + (!srcfileloc "wide-int.h" 1366) + nil ) + (!pair "fixed_wide_int_storage" + (!type already_seen 888) + (!srcfileloc "wide-int.h" 1221) + nil ) + (!pair "ASSIGNMENT_OPERATOR" + (!type already_seen 886) + (!srcfileloc "wide-int.h" 754) + nil ) + (!pair "generic_wide_int" + (!type already_seen 885) + (!srcfileloc "wide-int.h" 715) + nil ) + (!pair "T1" + (!type already_seen 882) + (!srcfileloc "wide-int.h" 459) + nil ) + (!pair "int_traitsprecision>>" + (!type already_seen 879) + (!srcfileloc "wide-int.h" 459) + nil ) + (!pair "signed_predicate_result" + (!type already_seen 2) + (!srcfileloc "wide-int.h" 442) + nil ) + (!pair "signed_shift_result_type" + (!type already_seen 2) + (!srcfileloc "wide-int.h" 441) + nil ) + (!pair "precision" + (!type already_seen 875) + (!srcfileloc "wide-int.h" 438) + nil ) + (!pair "T2" + (!type already_seen 878) + (!srcfileloc "wide-int.h" 438) + nil ) + (!pair "int_traitsprecision>>" + (!type already_seen 874) + (!srcfileloc "wide-int.h" 438) + nil ) + (!pair "predicate_result" + (!type already_seen 2) + (!srcfileloc "wide-int.h" 429) + nil ) + (!pair "operator_result" + (!type already_seen 2) + (!srcfileloc "wide-int.h" 428) + nil ) + (!pair "result_type" + (!type already_seen 2) + (!srcfileloc "wide-int.h" 420) + nil ) + (!pair "wide_int_ref" + (!type already_seen 870) + (!srcfileloc "wide-int.h" 334) + nil ) + (!pair "false" + (!type already_seen 872) + (!srcfileloc "wide-int.h" 334) + nil ) + (!pair "wide_int_ref_storage>" + (!type already_seen 870) + (!srcfileloc "wide-int.h" 334) + nil ) + (!pair "widest2_int" + (!type already_seen 866) + (!srcfileloc "wide-int.h" 327) + nil ) + (!pair "generic_wide_int>" + (!type already_seen 866) + (!srcfileloc "wide-int.h" 327) + nil ) + (!pair "WIDE_INT_MAX_PRECISION" + (!type already_seen 865) + (!srcfileloc "wide-int.h" 324) + nil ) + (!pair "ADDR_MAX_PRECISION" + (!type already_seen 865) + (!srcfileloc "wide-int.h" 323) + nil ) + (!pair "FIXED_WIDE_INT" + (!type already_seen 865) + (!srcfileloc "wide-int.h" 323) + nil ) + (!pair "wide_int" + (!type already_seen 497) + (!srcfileloc "wide-int.h" 322) + nil ) + (!pair "wide_int_storage" + (!type already_seen 498) + (!srcfileloc "wide-int.h" 322) + nil ) + (!pair "generic_wide_int" + (!type already_seen 497) + (!srcfileloc "wide-int.h" 322) + nil ) + (!pair "bitmap_obstack" + (!type already_seen 392) + (!srcfileloc "bitmap.h" 349) + nil ) + (!pair "bitmap_element" + (!type already_seen 389) + (!srcfileloc "bitmap.h" 345) + nil ) + (!pair "BITMAP_WORD" + (!type already_seen 2) + (!srcfileloc "bitmap.h" 276) + nil ) + (!pair "splay_tree" + (!type already_seen 860) + (!srcfileloc "../include/splay-tree.h" 127) + nil ) + (!pair "splay_tree_deallocate_fn" + (!type already_seen 2) + (!srcfileloc "../include/splay-tree.h" 88) + nil ) + (!pair "splay_tree_allocate_fn" + (!type already_seen 2) + (!srcfileloc "../include/splay-tree.h" 82) + nil ) + (!pair "splay_tree_foreach_fn" + (!type already_seen 2) + (!srcfileloc "../include/splay-tree.h" 76) + nil ) + (!pair "splay_tree_delete_value_fn" + (!type already_seen 2) + (!srcfileloc "../include/splay-tree.h" 73) + nil ) + (!pair "splay_tree_delete_key_fn" + (!type already_seen 2) + (!srcfileloc "../include/splay-tree.h" 67) + nil ) + (!pair "splay_tree_compare_fn" + (!type already_seen 2) + (!srcfileloc "../include/splay-tree.h" 58) + nil ) + (!pair "splay_tree_node" + (!type already_seen 858) + (!srcfileloc "../include/splay-tree.h" 54) + nil ) + (!pair "splay_tree_value" + (!type already_seen 2) + (!srcfileloc "../include/splay-tree.h" 51) + nil ) + (!pair "splay_tree_key" + (!type already_seen 2) + (!srcfileloc "../include/splay-tree.h" 50) + nil ) + (!pair "htab_t" + (!type already_seen 332) + (!srcfileloc "../include/hashtab.h" 139) + nil ) + (!pair "htab_free_with_arg" + (!type already_seen 2) + (!srcfileloc "../include/hashtab.h" 78) + nil ) + (!pair "htab_alloc_with_arg" + (!type already_seen 2) + (!srcfileloc "../include/hashtab.h" 77) + nil ) + (!pair "htab_free" + (!type already_seen 2) + (!srcfileloc "../include/hashtab.h" 73) + nil ) + (!pair "htab_alloc" + (!type already_seen 2) + (!srcfileloc "../include/hashtab.h" 70) + nil ) + (!pair "htab_trav" + (!type already_seen 2) + (!srcfileloc "../include/hashtab.h" 64) + nil ) + (!pair "htab_del" + (!type already_seen 2) + (!srcfileloc "../include/hashtab.h" 58) + nil ) + (!pair "htab_eq" + (!type already_seen 2) + (!srcfileloc "../include/hashtab.h" 54) + nil ) + (!pair "htab_hash" + (!type already_seen 2) + (!srcfileloc "../include/hashtab.h" 47) + nil ) + (!pair "hashval_t" + (!type already_seen 2) + (!srcfileloc "../include/hashtab.h" 42) + nil ) + (!pair "target_unit" + (!type already_seen 856) + (!srcfileloc "defaults.h" 1455) + nil ) + (!pair "TARGET_UNIT" + (!type already_seen 856) + (!srcfileloc "defaults.h" 1455) + nil ) + (!pair "uchar" + (!type already_seen 8) + (!srcfileloc "coretypes.h" 453) + nil ) + (!pair "gt_pointer_operator" + (!type already_seen 2) + (!srcfileloc "coretypes.h" 450) + nil ) + (!pair "complex_mode" + (!type already_seen 736) + (!srcfileloc "coretypes.h" 390) + nil ) + (!pair "string_int_pair" + (!type already_seen 845) + (!srcfileloc "coretypes.h" 363) + nil ) + (!pair "int" + (!type already_seen 373) + (!srcfileloc "coretypes.h" 363) + nil ) + (!pair "std::pair" + (!type already_seen 845) + (!srcfileloc "coretypes.h" 363) + nil ) + (!pair "tree_pair" + (!type already_seen 844) + (!srcfileloc "coretypes.h" 362) + nil ) + (!pair "std::pair" + (!type already_seen 844) + (!srcfileloc "coretypes.h" 362) + nil ) + (!pair "reg_class_t" + (!type already_seen 2) + (!srcfileloc "coretypes.h" 354) + nil ) + (!pair "const_basic_block" + (!type already_seen 268) + (!srcfileloc "coretypes.h" 334) + nil ) + (!pair "basic_block" + (!type already_seen 268) + (!srcfileloc "coretypes.h" 333) + nil ) + (!pair "const_edge" + (!type already_seen 272) + (!srcfileloc "coretypes.h" 331) + nil ) + (!pair "edge" + (!type already_seen 272) + (!srcfileloc "coretypes.h" 330) + nil ) + (!pair "alias_set_type" + (!type already_seen 2) + (!srcfileloc "coretypes.h" 327) + nil ) + (!pair "addr_space_t" + (!type already_seen 8) + (!srcfileloc "coretypes.h" 168) + nil ) + (!pair "diagnostic_input_charset_callback" + (!type already_seen 2) + (!srcfileloc "coretypes.h" 157) + nil ) + (!pair "section" + (!type already_seen 214) + (!srcfileloc "coretypes.h" 147) + nil ) + (!pair "gimple_seq" + (!type already_seen 281) + (!srcfileloc "coretypes.h" 100) + nil ) + (!pair "gimple" + (!type already_seen 282) + (!srcfileloc "coretypes.h" 100) + nil ) + (!pair "const_tree" + (!type already_seen 23) + (!srcfileloc "coretypes.h" 98) + nil ) + (!pair "tree" + (!type already_seen 23) + (!srcfileloc "coretypes.h" 97) + nil ) + (!pair "const_hwivec" + (!type already_seen 757) + (!srcfileloc "coretypes.h" 95) + nil ) + (!pair "hwivec" + (!type already_seen 757) + (!srcfileloc "coretypes.h" 94) + nil ) + (!pair "const_rtvec" + (!type already_seen 230) + (!srcfileloc "coretypes.h" 92) + nil ) + (!pair "rtvec" + (!type already_seen 230) + (!srcfileloc "coretypes.h" 91) + nil ) + (!pair "fixed_size_mode_pod" + (!type already_seen 742) + (!srcfileloc "coretypes.h" 71) + nil ) + (!pair "pod_mode" + (!type already_seen 742) + (!srcfileloc "coretypes.h" 71) + nil ) + (!pair "scalar_int_mode_pod" + (!type already_seen 741) + (!srcfileloc "coretypes.h" 70) + nil ) + (!pair "pod_mode" + (!type already_seen 741) + (!srcfileloc "coretypes.h" 70) + nil ) + (!pair "scalar_mode_pod" + (!type already_seen 44) + (!srcfileloc "coretypes.h" 69) + nil ) + (!pair "pod_mode" + (!type already_seen 44) + (!srcfileloc "coretypes.h" 69) + nil ) + (!pair "opt_scalar_float_mode" + (!type already_seen 740) + (!srcfileloc "coretypes.h" 67) + nil ) + (!pair "scalar_float_mode" + (!type already_seen 735) + (!srcfileloc "coretypes.h" 67) + nil ) + (!pair "opt_mode" + (!type already_seen 740) + (!srcfileloc "coretypes.h" 67) + nil ) + (!pair "opt_scalar_int_mode" + (!type already_seen 739) + (!srcfileloc "coretypes.h" 66) + nil ) + (!pair "scalar_int_mode" + (!type already_seen 289) + (!srcfileloc "coretypes.h" 66) + nil ) + (!pair "opt_mode" + (!type already_seen 739) + (!srcfileloc "coretypes.h" 66) + nil ) + (!pair "opt_scalar_mode" + (!type already_seen 738) + (!srcfileloc "coretypes.h" 65) + nil ) + (!pair "scalar_mode" + (!type already_seen 45) + (!srcfileloc "coretypes.h" 65) + nil ) + (!pair "opt_mode" + (!type already_seen 738) + (!srcfileloc "coretypes.h" 65) + nil ) + (!pair "const_rtx" + (!type already_seen 99) + (!srcfileloc "coretypes.h" 58) + nil ) + (!pair "rtx" + (!type already_seen 99) + (!srcfileloc "coretypes.h" 57) + nil ) + (!pair "const_sbitmap" + (!type already_seen 734) + (!srcfileloc "coretypes.h" 55) + nil ) + (!pair "sbitmap" + (!type already_seen 734) + (!srcfileloc "coretypes.h" 54) + nil ) + (!pair "const_bitmap" + (!type already_seen 386) + (!srcfileloc "coretypes.h" 52) + nil ) + (!pair "bitmap" + (!type already_seen 386) + (!srcfileloc "coretypes.h" 51) + nil ) + (!pair "gcov_type_unsigned" + (!type already_seen 2) + (!srcfileloc "coretypes.h" 47) + nil ) + (!pair "gcov_type" + (!type already_seen 2) + (!srcfileloc "coretypes.h" 46) + nil ) + (!pair "location_hash" + (!type already_seen 732) + (!srcfileloc "input.h" 280) + nil ) + (!pair "hash_map" + (!type already_seen 731) + (!srcfileloc "input.h" 280) + nil ) + (!pair "string_concat_db" + (!type already_seen 728) + (!srcfileloc "input.h" 261) + nil ) + (!pair "string_concat" + (!type already_seen 726) + (!srcfileloc "input.h" 252) + nil ) + (!pair "cpp_cb" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 1391) + nil ) + (!pair "cpp_comment_table" + (!type already_seen 717) + (!srcfileloc "../libcpp/include/cpplib.h" 1378) + nil ) + (!pair "cpp_comment" + (!type already_seen 716) + (!srcfileloc "../libcpp/include/cpplib.h" 1364) + nil ) + (!pair "cpp_num" + (!type already_seen 715) + (!srcfileloc "../libcpp/include/cpplib.h" 1201) + nil ) + (!pair "cpp_num_part" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 1200) + nil ) + (!pair "missing_header_cb" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 668) + nil ) + (!pair "cppchar_signed_t" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 295) + nil ) + (!pair "cppchar_t" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/cpplib.h" 294) + nil ) + (!pair "cpp_dir" + (!type already_seen 711) + (!srcfileloc "../libcpp/include/cpplib.h" 37) + nil ) + (!pair "cpp_callbacks" + (!type already_seen 710) + (!srcfileloc "../libcpp/include/cpplib.h" 36) + nil ) + (!pair "cpp_macro" + (!type already_seen 20) + (!srcfileloc "../libcpp/include/cpplib.h" 35) + nil ) + (!pair "cpp_hashnode" + (!type already_seen 12) + (!srcfileloc "../libcpp/include/cpplib.h" 34) + nil ) + (!pair "cpp_string" + (!type already_seen 687) + (!srcfileloc "../libcpp/include/cpplib.h" 33) + nil ) + (!pair "cpp_token" + (!type already_seen 683) + (!srcfileloc "../libcpp/include/cpplib.h" 32) + nil ) + (!pair "cpp_options" + (!type already_seen 709) + (!srcfileloc "../libcpp/include/cpplib.h" 31) + nil ) + (!pair "cpp_buffer" + (!type already_seen 708) + (!srcfileloc "../libcpp/include/cpplib.h" 30) + nil ) + (!pair "cpp_reader" + (!type already_seen 707) + (!srcfileloc "../libcpp/include/cpplib.h" 29) + nil ) + (!pair "expanded_location" + (!type already_seen 698) + (!srcfileloc "../libcpp/include/line-map.h" 1313) + nil ) + (!pair "maps_info_macro" + (!type already_seen 692) + (!srcfileloc "../libcpp/include/line-map.h" 787) + nil ) + (!pair "maps_info_ordinary" + (!type already_seen 691) + (!srcfileloc "../libcpp/include/line-map.h" 785) + nil ) + (!pair "source_range" + (!type already_seen 1) + (!srcfileloc "../libcpp/include/line-map.h" 756) + nil ) + (!pair "line_map_macro" + (!type already_seen 689) + (!srcfileloc "../libcpp/include/line-map.h" 740) + nil ) + (!pair "line_map_ordinary" + (!type already_seen 6) + (!srcfileloc "../libcpp/include/line-map.h" 725) + nil ) + (!pair "line_map_round_alloc_size_func" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 354) + nil ) + (!pair "line_map_realloc" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 350) + nil ) + (!pair "location_t" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 292) + nil ) + (!pair "linenum_arith_t" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 53) + nil ) + (!pair "linenum_type" + (!type already_seen 2) + (!srcfileloc "../libcpp/include/line-map.h" 50) + nil ) + (!pair "PTR" + (!type already_seen 3) + (!srcfileloc "gengtype.cc" 5263) + nil ) + (!pair "CONSTEXPR" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5262) + nil ) + (!pair "fixed_size_mode" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5261) + nil ) + (!pair "machine_mode" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5260) + nil ) + (!pair "void" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5259) + nil ) + (!pair "JCF_u2" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5258) + nil ) + (!pair "jword" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5257) + nil ) + (!pair "uintptr_t" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5256) + nil ) + (!pair "uint8" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5255) + nil ) + (!pair "uint32_t" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5254) + nil ) + (!pair "uint64_t" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5253) + nil ) + (!pair "poly_uint64" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5252) + nil ) + (!pair "poly_int64" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5251) + nil ) + (!pair "int64_t" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5250) + nil ) + (!pair "widest_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5249) + nil ) + (!pair "offset_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5248) + nil ) + (!pair "poly_int64_pod" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5247) + nil ) + (!pair "double_int" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5246) + nil ) + (!pair "FIXED_VALUE_TYPE" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5245) + nil ) + (!pair "REAL_VALUE_TYPE" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5244) + nil ) + (!pair "CUMULATIVE_ARGS" + (!type already_seen 2) + (!srcfileloc "gengtype.cc" 5243) + nil ) +) +(!variables 754 + (!pair "objc_eh_personality_decl" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 3838) + nil ) + (!pair "next_v2_EHTYPE_id_decl" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 3628) + nil ) + (!pair "next_v2_ehvtable_decl" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 3627) + nil ) + (!pair "objc_v2_ehtype_template" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 3626) + nil ) + (!pair "ehtype_list" + (!type already_seen 2296) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 3285) + nil ) + (!pair "ivar_offset_refs" + (!type already_seen 2305) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2995) + nil ) + (!pair "protlist" + (!type already_seen 2302) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2276) + nil ) + (!pair "nonlazy_category_list" + (!type already_seen 84) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2188) + nil ) + (!pair "category_list" + (!type already_seen 84) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2177) + nil ) + (!pair "nonlazy_class_list" + (!type already_seen 84) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2166) + nil ) + (!pair "class_list" + (!type already_seen 84) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2158) + nil ) + (!pair "metaclass_super_refs" + (!type already_seen 2296) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1484) + nil ) + (!pair "class_super_refs" + (!type already_seen 2296) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1483) + nil ) + (!pair "protrefs" + (!type already_seen 2302) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1312) + nil ) + (!pair "msgrefs" + (!type already_seen 2299) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1258) + nil ) + (!pair "classrefs" + (!type already_seen 2296) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1091) + nil ) + (!pair "extern_names" + (!type already_seen 2283) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 235) + (!options + (!option length string "SIZEHASHTABLE") + ) + ) + (!pair "objc_v2_global_trees" + (!type array 2309 nil gc_used "OCTI_V2_MAX" + (!type already_seen 23) + ) + (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 185) + nil ) + (!pair "objc_eh_personality_decl" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 2788) + nil ) + (!pair "V1_Property_decl" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1518) + nil ) + (!pair "V1_ProtocolExt_decl" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1517) + nil ) + (!pair "V1_Protocol_OPT_CLS_METHODS_decl" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1516) + nil ) + (!pair "V1_Protocol_OPT_NST_METHODS_decl" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1515) + nil ) + (!pair "objc_class_ext_template" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1110) + nil ) + (!pair "objc_protocol_extension_template" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1076) + nil ) + (!pair "objc_v1_property_template" + (!type already_seen 23) + (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1046) + nil ) + (!pair "class_reference_idx" + (!type already_seen 2) + (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 678) + nil ) + (!pair "objc_eh_personality_decl" + (!type already_seen 23) + (!srcfileloc "objc/objc-gnu-runtime-abi-01.cc" 2129) + nil ) + (!pair "num_static_inst" + (!type already_seen 2) + (!srcfileloc "objc/objc-gnu-runtime-abi-01.cc" 870) + nil ) + (!pair "meta_base" + (!type already_seen 23) + (!srcfileloc "objc/objc-gnu-runtime-abi-01.cc" 193) + nil ) + (!pair "objc_meta" + (!type already_seen 23) + (!srcfileloc "objc/objc-gnu-runtime-abi-01.cc" 192) + nil ) + (!pair "property_name_attr_idx" + (!type already_seen 2) + (!srcfileloc "objc/objc-runtime-shared-support.cc" 290) + nil ) + (!pair "meth_var_types_idx" + (!type already_seen 2) + (!srcfileloc "objc/objc-runtime-shared-support.cc" 289) + nil ) + (!pair "meth_var_names_idx" + (!type already_seen 2) + (!srcfileloc "objc/objc-runtime-shared-support.cc" 288) + nil ) + (!pair "objc_rt_trees" + (!type array 2310 nil gc_used "OCTI_RT_META_MAX" + (!type already_seen 23) + ) + (!srcfileloc "objc/objc-runtime-shared-support.cc" 51) + nil ) + (!pair "objc_parmlist" + (!type already_seen 23) + (!srcfileloc "objc/objc-act.cc" 8436) + nil ) + (!pair "interface_map" + (!type already_seen 2280) + (!srcfileloc "objc/objc-act.cc" 3942) + nil ) + (!pair "string_layout_checked" + (!type already_seen 2) + (!srcfileloc "objc/objc-act.cc" 3228) + nil ) + (!pair "string_htab" + (!type already_seen 2292) + (!srcfileloc "objc/objc-act.cc" 260) + nil ) + (!pair "alias_name_map" + (!type already_seen 2280) + (!srcfileloc "objc/objc-act.cc" 164) + nil ) + (!pair "class_name_map" + (!type already_seen 2280) + (!srcfileloc "objc/objc-act.cc" 163) + nil ) + (!pair "class_method_map" + (!type already_seen 2280) + (!srcfileloc "objc/objc-act.cc" 159) + nil ) + (!pair "instance_method_map" + (!type already_seen 2280) + (!srcfileloc "objc/objc-act.cc" 158) + nil ) + (!pair "objc_global_trees" + (!type array 2311 nil gc_used "OCTI_MAX" + (!type already_seen 23) + ) + (!srcfileloc "objc/objc-act.h" 437) + nil ) + (!pair "objc_ivar_visibility" + (!type already_seen 2) + (!srcfileloc "objc/objc-act.h" 308) + nil ) + (!pair "cat_count" + (!type already_seen 2) + (!srcfileloc "objc/objc-act.h" 306) + nil ) + (!pair "imp_count" + (!type already_seen 2) + (!srcfileloc "objc/objc-act.h" 305) + nil ) + (!pair "imp_list" + (!type already_seen 2287) + (!srcfileloc "objc/objc-act.h" 304) + nil ) + (!pair "local_variables_to_volatilize" + (!type already_seen 84) + (!srcfileloc "objc/objc-act.h" 291) + nil ) + (!pair "real_file_decl_data" + (!type already_seen 322) + (!srcfileloc "lto/lto-common.cc" 2698) + (!options + (!option length string "real_file_count + 1") + ) + ) + (!pair "all_file_decl_data" + (!type already_seen 322) + (!srcfileloc "lto/lto-common.cc" 2665) + (!options + (!option length string "lto_stats.num_input_files + 1") + ) + ) + (!pair "types_to_register" + (!type already_seen 84) + (!srcfileloc "lto/lto-common.cc" 228) + nil ) + (!pair "tree_with_vars" + (!type already_seen 84) + (!srcfileloc "lto/lto-common.h" 28) + nil ) + (!pair "registered_builtin_types" + (!type already_seen 23) + (!srcfileloc "lto/lto-lang.cc" 790) + nil ) + (!pair "signed_size_type_node" + (!type already_seen 23) + (!srcfileloc "lto/lto-lang.cc" 244) + nil ) + (!pair "uintmax_type_node" + (!type already_seen 23) + (!srcfileloc "lto/lto-lang.cc" 243) + nil ) + (!pair "intmax_type_node" + (!type already_seen 23) + (!srcfileloc "lto/lto-lang.cc" 242) + nil ) + (!pair "wint_type_node" + (!type already_seen 23) + (!srcfileloc "lto/lto-lang.cc" 241) + nil ) + (!pair "const_string_type_node" + (!type already_seen 23) + (!srcfileloc "lto/lto-lang.cc" 240) + nil ) + (!pair "string_type_node" + (!type already_seen 23) + (!srcfileloc "lto/lto-lang.cc" 239) + nil ) + (!pair "builtin_types" + (!type array 2312 nil gc_used "(int) BT_LAST + 1" + (!type already_seen 23) + ) + (!srcfileloc "lto/lto-lang.cc" 237) + nil ) + (!pair "built_in_attributes" + (!type array 2313 nil gc_used "(int) ATTR_LAST" + (!type already_seen 23) + ) + (!srcfileloc "lto/lto-lang.cc" 172) + nil ) + (!pair "go_non_zero_struct" + (!type already_seen 23) + (!srcfileloc "go/go-c.h" 81) + nil ) + (!pair "go_gc_root" + (!type already_seen 23) + (!srcfileloc "go/go-lang.cc" 587) + nil ) + (!pair "gfc_rank_cst" + (!type array 2314 nil gc_used "GFC_MAX_DIMENSIONS + 1" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans-const.h" 65) + nil ) + (!pair "gfor_fndecl_caf_random_init" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 970) + nil ) + (!pair "gfor_fndecl_random_init" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 969) + nil ) + (!pair "gfor_fndecl_ieee_procedure_exit" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 966) + nil ) + (!pair "gfor_fndecl_ieee_procedure_entry" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 965) + nil ) + (!pair "gfor_fndecl_sr_kind" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 962) + nil ) + (!pair "gfor_fndecl_si_kind" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 961) + nil ) + (!pair "gfor_fndecl_sc_kind" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 960) + nil ) + (!pair "gfor_fndecl_is_contiguous0" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 957) + nil ) + (!pair "gfor_fndecl_kill_sub" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 956) + nil ) + (!pair "gfor_fndecl_kill" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 955) + nil ) + (!pair "gfor_fndecl_iargc" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 954) + nil ) + (!pair "gfor_fndecl_convert_char4_to_char1" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 951) + nil ) + (!pair "gfor_fndecl_convert_char1_to_char4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 950) + nil ) + (!pair "gfor_fndecl_select_string_char4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 947) + nil ) + (!pair "gfor_fndecl_adjustr_char4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 946) + nil ) + (!pair "gfor_fndecl_adjustl_char4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 945) + nil ) + (!pair "gfor_fndecl_string_minmax_char4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 944) + nil ) + (!pair "gfor_fndecl_string_trim_char4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 943) + nil ) + (!pair "gfor_fndecl_string_verify_char4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 942) + nil ) + (!pair "gfor_fndecl_string_scan_char4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 941) + nil ) + (!pair "gfor_fndecl_string_index_char4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 940) + nil ) + (!pair "gfor_fndecl_string_len_trim_char4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 939) + nil ) + (!pair "gfor_fndecl_concat_string_char4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 938) + nil ) + (!pair "gfor_fndecl_compare_string_char4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 937) + nil ) + (!pair "gfor_fndecl_select_string" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 936) + nil ) + (!pair "gfor_fndecl_adjustr" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 935) + nil ) + (!pair "gfor_fndecl_adjustl" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 934) + nil ) + (!pair "gfor_fndecl_string_minmax" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 933) + nil ) + (!pair "gfor_fndecl_string_trim" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 932) + nil ) + (!pair "gfor_fndecl_string_verify" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 931) + nil ) + (!pair "gfor_fndecl_string_scan" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 930) + nil ) + (!pair "gfor_fndecl_string_index" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 929) + nil ) + (!pair "gfor_fndecl_string_len_trim" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 928) + nil ) + (!pair "gfor_fndecl_concat_string" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 927) + nil ) + (!pair "gfor_fndecl_compare_string" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 926) + nil ) + (!pair "gfor_fndecl_zgemm" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 923) + nil ) + (!pair "gfor_fndecl_cgemm" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 922) + nil ) + (!pair "gfor_fndecl_dgemm" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 921) + nil ) + (!pair "gfor_fndecl_sgemm" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 920) + nil ) + (!pair "gfor_fndecl_math_ishftc16" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 917) + nil ) + (!pair "gfor_fndecl_math_ishftc8" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 916) + nil ) + (!pair "gfor_fndecl_math_ishftc4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 915) + nil ) + (!pair "gfor_fndecl_math_powi" + (!type array 2315 nil gc_used "4" + (!type array 2316 nil gc_used "3" + (!type already_seen 2260) + ) + ) + (!srcfileloc "fortran/trans.h" 914) + nil ) + (!pair "gfor_fndecl_caf_is_present" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 902) + nil ) + (!pair "gfor_fndecl_co_sum" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 901) + nil ) + (!pair "gfor_fndecl_co_reduce" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 900) + nil ) + (!pair "gfor_fndecl_co_min" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 899) + nil ) + (!pair "gfor_fndecl_co_max" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 898) + nil ) + (!pair "gfor_fndecl_co_broadcast" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 897) + nil ) + (!pair "gfor_fndecl_caf_team_number" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 896) + nil ) + (!pair "gfor_fndecl_caf_sync_team" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 895) + nil ) + (!pair "gfor_fndecl_caf_get_team" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 894) + nil ) + (!pair "gfor_fndecl_caf_end_team" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 893) + nil ) + (!pair "gfor_fndecl_caf_change_team" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 892) + nil ) + (!pair "gfor_fndecl_caf_form_team" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 891) + nil ) + (!pair "gfor_fndecl_caf_stopped_images" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 890) + nil ) + (!pair "gfor_fndecl_caf_image_status" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 889) + nil ) + (!pair "gfor_fndecl_caf_failed_images" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 888) + nil ) + (!pair "gfor_fndecl_caf_fail_image" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 887) + nil ) + (!pair "gfor_fndecl_caf_event_query" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 886) + nil ) + (!pair "gfor_fndecl_caf_event_wait" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 885) + nil ) + (!pair "gfor_fndecl_caf_event_post" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 884) + nil ) + (!pair "gfor_fndecl_caf_unlock" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 883) + nil ) + (!pair "gfor_fndecl_caf_lock" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 882) + nil ) + (!pair "gfor_fndecl_caf_atomic_op" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 881) + nil ) + (!pair "gfor_fndecl_caf_atomic_cas" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 880) + nil ) + (!pair "gfor_fndecl_caf_atomic_ref" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 879) + nil ) + (!pair "gfor_fndecl_caf_atomic_def" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 878) + nil ) + (!pair "gfor_fndecl_caf_error_stop_str" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 877) + nil ) + (!pair "gfor_fndecl_caf_error_stop" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 876) + nil ) + (!pair "gfor_fndecl_caf_stop_str" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 875) + nil ) + (!pair "gfor_fndecl_caf_stop_numeric" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 874) + nil ) + (!pair "gfor_fndecl_caf_sync_images" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 873) + nil ) + (!pair "gfor_fndecl_caf_sync_memory" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 872) + nil ) + (!pair "gfor_fndecl_caf_sync_all" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 871) + nil ) + (!pair "gfor_fndecl_caf_sendget_by_ref" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 870) + nil ) + (!pair "gfor_fndecl_caf_send_by_ref" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 869) + nil ) + (!pair "gfor_fndecl_caf_get_by_ref" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 868) + nil ) + (!pair "gfor_fndecl_caf_sendget" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 867) + nil ) + (!pair "gfor_fndecl_caf_send" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 866) + nil ) + (!pair "gfor_fndecl_caf_get" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 865) + nil ) + (!pair "gfor_fndecl_caf_deregister" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 864) + nil ) + (!pair "gfor_fndecl_caf_register" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 863) + nil ) + (!pair "gfor_fndecl_caf_num_images" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 862) + nil ) + (!pair "gfor_fndecl_caf_this_image" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 861) + nil ) + (!pair "gfor_fndecl_caf_finalize" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 860) + nil ) + (!pair "gfor_fndecl_caf_init" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 859) + nil ) + (!pair "gfor_fndecl_system_clock8" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 855) + nil ) + (!pair "gfor_fndecl_system_clock4" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 854) + nil ) + (!pair "gfor_fndecl_associated" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 853) + nil ) + (!pair "gfor_fndecl_in_unpack" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 852) + nil ) + (!pair "gfor_fndecl_in_pack" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 851) + nil ) + (!pair "gfor_fndecl_fdate" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 850) + nil ) + (!pair "gfor_fndecl_ctime" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 849) + nil ) + (!pair "gfor_fndecl_ttynam" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 848) + nil ) + (!pair "gfor_fndecl_set_options" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 847) + nil ) + (!pair "gfor_fndecl_set_fpe" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 846) + nil ) + (!pair "gfor_fndecl_generate_error" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 845) + nil ) + (!pair "gfor_fndecl_os_error_at" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 844) + nil ) + (!pair "gfor_fndecl_runtime_warning_at" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 843) + nil ) + (!pair "gfor_fndecl_runtime_error_at" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 842) + nil ) + (!pair "gfor_fndecl_runtime_error" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 841) + nil ) + (!pair "gfor_fndecl_error_stop_string" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 840) + nil ) + (!pair "gfor_fndecl_error_stop_numeric" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 839) + nil ) + (!pair "gfor_fndecl_stop_string" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 838) + nil ) + (!pair "gfor_fndecl_stop_numeric" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 837) + nil ) + (!pair "gfor_fndecl_pause_string" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 836) + nil ) + (!pair "gfor_fndecl_pause_numeric" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 835) + nil ) + (!pair "gfc_static_ctors" + (!type already_seen 23) + (!srcfileloc "fortran/trans.h" 717) + nil ) + (!pair "gfc_charlen_type_node" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 53) + nil ) + (!pair "logical_false_node" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 49) + nil ) + (!pair "logical_true_node" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 48) + nil ) + (!pair "logical_type_node" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 47) + nil ) + (!pair "gfc_complex_float128_type_node" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 35) + nil ) + (!pair "gfc_float128_type_node" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 34) + nil ) + (!pair "pchar_type_node" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 33) + nil ) + (!pair "prvoid_type_node" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 32) + nil ) + (!pair "pvoid_type_node" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 31) + nil ) + (!pair "ppvoid_type_node" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 30) + nil ) + (!pair "gfc_character1_type_node" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 29) + nil ) + (!pair "gfc_array_range_type" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 28) + nil ) + (!pair "gfc_array_index_type" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.h" 27) + nil ) + (!pair "gfc_pcharacter_types" + (!type array 2317 nil gc_used "MAX_CHARACTER_KINDS + 1" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans-types.cc" 99) + nil ) + (!pair "gfc_character_types" + (!type array 2318 nil gc_used "MAX_CHARACTER_KINDS + 1" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans-types.cc" 98) + nil ) + (!pair "gfc_complex_types" + (!type array 2319 nil gc_used "MAX_REAL_KINDS + 1" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans-types.cc" 94) + nil ) + (!pair "gfc_real_types" + (!type array 2320 nil gc_used "MAX_REAL_KINDS + 1" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans-types.cc" 93) + nil ) + (!pair "gfc_logical_types" + (!type array 2321 nil gc_used "MAX_INT_KINDS + 1" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans-types.cc" 89) + nil ) + (!pair "gfc_integer_types" + (!type array 2322 nil gc_used "MAX_INT_KINDS + 1" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans-types.cc" 88) + nil ) + (!pair "gfc_cfi_descriptor_base" + (!type array 2323 nil gc_used "2 * (CFI_MAX_RANK + 2)" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans-types.cc" 80) + nil ) + (!pair "gfc_array_descriptor_base_caf" + (!type array 2324 nil gc_used "2 * (GFC_MAX_DIMENSIONS+1)" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans-types.cc" 79) + nil ) + (!pair "gfc_array_descriptor_base" + (!type array 2325 nil gc_used "2 * (GFC_MAX_DIMENSIONS+1)" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans-types.cc" 78) + nil ) + (!pair "gfc_max_array_element_size" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.cc" 77) + nil ) + (!pair "gfc_desc_dim_type" + (!type already_seen 23) + (!srcfileloc "fortran/trans-types.cc" 76) + nil ) + (!pair "select_struct" + (!type array 2326 nil gc_used "2" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans-stmt.cc" 3251) + nil ) + (!pair "dt_parm" + (!type already_seen 23) + (!srcfileloc "fortran/trans-io.cc" 162) + nil ) + (!pair "iocall" + (!type array 2327 nil gc_used "IOCALL_NUM" + (!type already_seen 23) + ) + (!srcfileloc "fortran/trans-io.cc" 153) + nil ) + (!pair "st_parameter_field" + (!type array 2328 nil gc_used "" + (!type already_seen 2246) + ) + (!srcfileloc "fortran/trans-io.cc" 101) + nil ) + (!pair "st_parameter" + (!type array 2329 nil gc_used "" + (!type already_seen 2247) + ) + (!srcfileloc "fortran/trans-io.cc" 90) + nil ) + (!pair "gfc_intrinsic_map" + (!type array 2330 nil gc_used "" + (!type already_seen 2245) + ) + (!srcfileloc "fortran/trans-intrinsic.cc" 116) + nil ) + (!pair "module_htab" + (!type already_seen 2240) + (!srcfileloc "fortran/trans-decl.cc" 5066) + nil ) + (!pair "saved_local_decls" + (!type already_seen 23) + (!srcfileloc "fortran/trans-decl.cc" 68) + nil ) + (!pair "saved_parent_function_decls" + (!type already_seen 23) + (!srcfileloc "fortran/trans-decl.cc" 64) + nil ) + (!pair "saved_function_decls" + (!type already_seen 23) + (!srcfileloc "fortran/trans-decl.cc" 63) + nil ) + (!pair "parent_fake_result_decl" + (!type already_seen 23) + (!srcfileloc "fortran/trans-decl.cc" 58) + nil ) + (!pair "current_fake_result_decl" + (!type already_seen 23) + (!srcfileloc "fortran/trans-decl.cc" 57) + nil ) + (!pair "global_binding_level" + (!type already_seen 435) + (!srcfileloc "fortran/f95-lang.cc" 317) + nil ) + (!pair "current_binding_level" + (!type already_seen 435) + (!srcfileloc "fortran/f95-lang.cc" 313) + nil ) + (!pair "current_translation_unit" + (!type already_seen 23) + (!srcfileloc "fortran/f95-lang.cc" 191) + nil ) + (!pair "free_binding_level" + (!type already_seen 435) + (!srcfileloc "fortran/f95-lang.cc" 185) + nil ) + (!pair "tinfo_types" + (!type array 2331 nil gc_used "TK_END" + (!type already_seen 23) + ) + (!srcfileloc "d/typeinfo.cc" 95) + nil ) + (!pair "d_eh_personality_decl" + (!type already_seen 23) + (!srcfileloc "d/d-lang.cc" 1900) + nil ) + (!pair "d_keep_list" + (!type already_seen 23) + (!srcfileloc "d/d-lang.cc" 1889) + nil ) + (!pair "global_declarations" + (!type already_seen 84) + (!srcfileloc "d/d-lang.cc" 95) + nil ) + (!pair "global_context" + (!type already_seen 23) + (!srcfileloc "d/d-lang.cc" 92) + nil ) + (!pair "builtin_types" + (!type array 2332 nil gc_used "(int) BT_LAST + 1" + (!type already_seen 23) + ) + (!srcfileloc "d/d-builtins.cc" 1027) + nil ) + (!pair "built_in_attributes" + (!type array 2333 nil gc_used "(int) ATTR_LAST" + (!type already_seen 23) + ) + (!srcfileloc "d/d-builtins.cc" 931) + nil ) + (!pair "signed_size_type_node" + (!type already_seen 23) + (!srcfileloc "d/d-builtins.cc" 779) + nil ) + (!pair "uintmax_type_node" + (!type already_seen 23) + (!srcfileloc "d/d-builtins.cc" 778) + nil ) + (!pair "intmax_type_node" + (!type already_seen 23) + (!srcfileloc "d/d-builtins.cc" 777) + nil ) + (!pair "wint_type_node" + (!type already_seen 23) + (!srcfileloc "d/d-builtins.cc" 776) + nil ) + (!pair "const_string_type_node" + (!type already_seen 23) + (!srcfileloc "d/d-builtins.cc" 775) + nil ) + (!pair "string_type_node" + (!type already_seen 23) + (!srcfileloc "d/d-builtins.cc" 774) + nil ) + (!pair "gcc_builtins_types" + (!type already_seen 84) + (!srcfileloc "d/d-builtins.cc" 48) + nil ) + (!pair "gcc_builtins_libfuncs" + (!type already_seen 84) + (!srcfileloc "d/d-builtins.cc" 47) + nil ) + (!pair "gcc_builtins_functions" + (!type already_seen 84) + (!srcfileloc "d/d-builtins.cc" 46) + nil ) + (!pair "d_global_trees" + (!type array 2334 nil gc_used "DTI_MAX" + (!type already_seen 23) + ) + (!srcfileloc "d/d-tree.h" 432) + nil ) + (!pair "global_binding_level" + (!type already_seen 435) + (!srcfileloc "d/d-tree.h" 128) + nil ) + (!pair "current_binding_level" + (!type already_seen 435) + (!srcfileloc "d/d-tree.h" 127) + nil ) + (!pair "vlt_register_set_fndecl" + (!type already_seen 23) + (!srcfileloc "cp/vtable-class-hierarchy.cc" 133) + nil ) + (!pair "vlt_register_pairs_fndecl" + (!type already_seen 23) + (!srcfileloc "cp/vtable-class-hierarchy.cc" 132) + nil ) + (!pair "vlt_saved_class_info" + (!type already_seen 84) + (!srcfileloc "cp/vtable-class-hierarchy.cc" 131) + nil ) + (!pair "deleted_copy_types" + (!type already_seen 468) + (!srcfileloc "cp/tree.cc" 4416) + nil ) + (!pair "list_hash_table" + (!type already_seen 2211) + (!srcfileloc "cp/tree.cc" 2166) + nil ) + (!pair "cplus_array_htab" + (!type already_seen 2207) + (!srcfileloc "cp/tree.cc" 1034) + nil ) + (!pair "deferred_access_no_check" + (!type already_seen 2) + (!srcfileloc "cp/semantics.cc" 138) + nil ) + (!pair "deferred_access_stack" + (!type already_seen 2201) + (!srcfileloc "cp/semantics.cc" 137) + nil ) + (!pair "tinfo_descs" + (!type already_seen 2198) + (!srcfileloc "cp/rtti.cc" 122) + nil ) + (!pair "dguide_cache" + (!type already_seen 2195) + (!srcfileloc "cp/pt.cc" 29962) + (!options + (!option deletable string "") + ) + ) + (!pair "explicit_specifier_map" + (!type already_seen 938) + (!srcfileloc "cp/pt.cc" 13986) + (!options + (!option cache string "") + ) + ) + (!pair "defarg_inst" + (!type already_seen 1557) + (!srcfileloc "cp/pt.cc" 13849) + (!options + (!option cache string "") + ) + ) + (!pair "last_error_tinst_level" + (!type already_seen 1972) + (!srcfileloc "cp/pt.cc" 11068) + nil ) + (!pair "pending_template_freelist_head" + (!type already_seen 2182) + (!srcfileloc "cp/pt.cc" 9599) + (!options + (!option deletable string "") + ) + ) + (!pair "tinst_level_freelist_head" + (!type already_seen 1972) + (!srcfileloc "cp/pt.cc" 9590) + (!options + (!option deletable string "") + ) + ) + (!pair "tree_list_freelist_head" + (!type already_seen 23) + (!srcfileloc "cp/pt.cc" 9581) + (!options + (!option deletable string "") + ) + ) + (!pair "defaulted_ttp_cache" + (!type already_seen 393) + (!srcfileloc "cp/pt.cc" 7864) + (!options + (!option deletable string "") + ) + ) + (!pair "tparm_obj_values" + (!type already_seen 393) + (!srcfileloc "cp/pt.cc" 7205) + nil ) + (!pair "canonical_template_parms" + (!type already_seen 84) + (!srcfileloc "cp/pt.cc" 122) + nil ) + (!pair "type_specializations" + (!type already_seen 2185) + (!srcfileloc "cp/pt.cc" 116) + nil ) + (!pair "decl_specializations" + (!type already_seen 2185) + (!srcfileloc "cp/pt.cc" 115) + nil ) + (!pair "saved_access_scope" + (!type already_seen 84) + (!srcfileloc "cp/pt.cc" 72) + nil ) + (!pair "current_tinst_level" + (!type already_seen 1972) + (!srcfileloc "cp/pt.cc" 70) + nil ) + (!pair "last_pending_template" + (!type already_seen 2182) + (!srcfileloc "cp/pt.cc" 63) + nil ) + (!pair "pending_templates" + (!type already_seen 2182) + (!srcfileloc "cp/pt.cc" 62) + nil ) + (!pair "generic_parm_count" + (!type already_seen 2) + (!srcfileloc "cp/parser.cc" 48088) + nil ) + (!pair "the_parser" + (!type already_seen 2016) + (!srcfileloc "cp/parser.cc" 47576) + nil ) + (!pair "cp_parser_decl_specs_attrs" + (!type already_seen 84) + (!srcfileloc "cp/parser.cc" 19249) + nil ) + (!pair "cp_parser_context_free_list" + (!type already_seen 2008) + (!srcfileloc "cp/parser.cc" 2010) + (!options + (!option deletable string "") + ) + ) + (!pair "free_saved_scope" + (!type already_seen 1952) + (!srcfileloc "cp/name-lookup.cc" 8324) + (!options + (!option deletable string "") + ) + ) + (!pair "free_binding_level" + (!type already_seen 82) + (!srcfileloc "cp/name-lookup.cc" 4233) + (!options + (!option deletable string "") + ) + ) + (!pair "extern_c_decls" + (!type already_seen 91) + (!srcfileloc "cp/name-lookup.cc" 2986) + nil ) + (!pair "free_bindings" + (!type already_seen 89) + (!srcfileloc "cp/name-lookup.cc" 2376) + (!options + (!option deletable string "") + ) + ) + (!pair "headers" + (!type already_seen 386) + (!srcfileloc "cp/module.cc" 16718) + nil ) + (!pair "macro_exports" + (!type already_seen 2162) + (!srcfileloc "cp/module.cc" 16714) + nil ) + (!pair "note_defs" + (!type already_seen 2156) + (!srcfileloc "cp/module.cc" 4482) + (!options + (!option cache string "") + ) + ) + (!pair "partial_specializations" + (!type already_seen 84) + (!srcfileloc "cp/module.cc" 3848) + nil ) + (!pair "class_members" + (!type already_seen 84) + (!srcfileloc "cp/module.cc" 3839) + nil ) + (!pair "modules_hash" + (!type already_seen 2148) + (!srcfileloc "cp/module.cc" 3818) + nil ) + (!pair "modules" + (!type already_seen 2146) + (!srcfileloc "cp/module.cc" 3815) + nil ) + (!pair "fixed_trees" + (!type already_seen 84) + (!srcfileloc "cp/module.cc" 3796) + nil ) + (!pair "comp_cat_cache" + (!type array 2335 nil gc_used "cc_last" + (!type already_seen 23) + ) + (!srcfileloc "cp/method.cc" 933) + (!options + (!option deletable string "") + ) + ) + (!pair "thunk_labelno" + (!type already_seen 2) + (!srcfileloc "cp/method.cc" 183) + nil ) + (!pair "subst_identifiers" + (!type array 2336 nil gc_used "SUBID_MAX" + (!type already_seen 23) + ) + (!srcfileloc "cp/mangle.cc" 155) + nil ) + (!pair "G" + (!type already_seen 2113) + (!srcfileloc "cp/mangle.cc" 126) + nil ) + (!pair "subsumption_cache" + (!type already_seen 2112) + (!srcfileloc "cp/logic.cc" 751) + (!options + (!option deletable string "") + ) + ) + (!pair "conv_type_names" + (!type already_seen 2106) + (!srcfileloc "cp/lex.cc" 813) + nil ) + (!pair "lambda_scope_stack" + (!type already_seen 2101) + (!srcfileloc "cp/lambda.cc" 1404) + nil ) + (!pair "lambda_count" + (!type already_seen 2) + (!srcfileloc "cp/lambda.cc" 1398) + nil ) + (!pair "lambda_scope" + (!type already_seen 23) + (!srcfileloc "cp/lambda.cc" 1397) + nil ) + (!pair "max_id" + (!type already_seen 23) + (!srcfileloc "cp/lambda.cc" 488) + nil ) + (!pair "ptr_id" + (!type already_seen 23) + (!srcfileloc "cp/lambda.cc" 487) + nil ) + (!pair "nsdmi_inst" + (!type already_seen 938) + (!srcfileloc "cp/init.cc" 566) + (!options + (!option cache string "") + ) + ) + (!pair "fn" + (!type already_seen 23) + (!srcfileloc "cp/init.cc" 53) + nil ) + (!pair "global_friend" + (!type already_seen 23) + (!srcfileloc "cp/friend.cc" 35) + nil ) + (!pair "pending_noexcept_checks" + (!type already_seen 2097) + (!srcfileloc "cp/except.cc" 1111) + nil ) + (!pair "ssdf_decls" + (!type already_seen 84) + (!srcfileloc "cp/decl2.cc" 3958) + nil ) + (!pair "ssdf_decl" + (!type already_seen 23) + (!srcfileloc "cp/decl2.cc" 3954) + nil ) + (!pair "priority_decl" + (!type already_seen 23) + (!srcfileloc "cp/decl2.cc" 3951) + nil ) + (!pair "initialize_p_decl" + (!type already_seen 23) + (!srcfileloc "cp/decl2.cc" 3948) + nil ) + (!pair "mangled_decls" + (!type already_seen 2094) + (!srcfileloc "cp/decl2.cc" 137) + nil ) + (!pair "mangling_aliases" + (!type already_seen 84) + (!srcfileloc "cp/decl2.cc" 101) + nil ) + (!pair "no_linkage_decls" + (!type already_seen 84) + (!srcfileloc "cp/decl2.cc" 97) + nil ) + (!pair "deferred_fns" + (!type already_seen 84) + (!srcfileloc "cp/decl2.cc" 93) + nil ) + (!pair "pending_statics" + (!type already_seen 84) + (!srcfileloc "cp/decl2.cc" 89) + nil ) + (!pair "start_cleanup_cnt" + (!type already_seen 2) + (!srcfileloc "cp/decl.cc" 9370) + nil ) + (!pair "decomp_type_table" + (!type already_seen 938) + (!srcfileloc "cp/decl.cc" 8823) + (!options + (!option cache string "") + ) + ) + (!pair "typename_htab" + (!type already_seen 2087) + (!srcfileloc "cp/decl.cc" 4043) + nil ) + (!pair "local_entities" + (!type already_seen 84) + (!srcfileloc "cp/decl.cc" 903) + (!options + (!option deletable string "") + ) + ) + (!pair "incomplete_vars" + (!type already_seen 2082) + (!srcfileloc "cp/decl.cc" 256) + nil ) + (!pair "debug_type_map" + (!type already_seen 941) + (!srcfileloc "cp/cp-objcp-common.cc" 129) + (!options + (!option cache string "") + ) + ) + (!pair "cp_eh_personality_decl" + (!type already_seen 23) + (!srcfileloc "cp/cp-lang.cc" 146) + nil ) + (!pair "source_location_id" + (!type already_seen 2) + (!srcfileloc "cp/cp-gimplify.cc" 3235) + nil ) + (!pair "source_location_table" + (!type already_seen 2079) + (!srcfileloc "cp/cp-gimplify.cc" 3234) + nil ) + (!pair "fold_cache" + (!type already_seen 393) + (!srcfileloc "cp/cp-gimplify.cc" 2391) + (!options + (!option deletable string "") + ) + ) + (!pair "to_ramp" + (!type already_seen 393) + (!srcfileloc "cp/coroutines.cc" 560) + nil ) + (!pair "void_coro_handle_type" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 272) + nil ) + (!pair "coro_handle_templ" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 271) + nil ) + (!pair "coro_traits_templ" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 270) + nil ) + (!pair "coro_frame_i_a_r_c_id" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 227) + nil ) + (!pair "coro_actor_continue_id" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 226) + nil ) + (!pair "coro_self_handle_id" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 225) + nil ) + (!pair "coro_resume_index_id" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 224) + nil ) + (!pair "coro_frame_needs_free_id" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 223) + nil ) + (!pair "coro_promise_id" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 222) + nil ) + (!pair "coro_destroy_fn_id" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 221) + nil ) + (!pair "coro_resume_fn_id" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 220) + nil ) + (!pair "coro_await_resume_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 216) + nil ) + (!pair "coro_await_suspend_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 215) + nil ) + (!pair "coro_await_ready_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 214) + nil ) + (!pair "coro_unhandled_exception_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 210) + nil ) + (!pair "coro_gro_on_allocation_fail_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 209) + nil ) + (!pair "coro_get_return_object_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 208) + nil ) + (!pair "coro_from_address_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 207) + nil ) + (!pair "coro_address_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 206) + nil ) + (!pair "coro_resume_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 205) + nil ) + (!pair "coro_yield_value_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 204) + nil ) + (!pair "coro_return_value_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 203) + nil ) + (!pair "coro_return_void_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 202) + nil ) + (!pair "coro_final_suspend_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 201) + nil ) + (!pair "coro_initial_suspend_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 200) + nil ) + (!pair "coro_await_transform_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 199) + nil ) + (!pair "coro_promise_type_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 195) + nil ) + (!pair "coro_handle_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 194) + nil ) + (!pair "coro_traits_identifier" + (!type already_seen 23) + (!srcfileloc "cp/coroutines.cc" 193) + nil ) + (!pair "coroutine_info_table" + (!type already_seen 2059) + (!srcfileloc "cp/coroutines.cc" 114) + nil ) + (!pair "decl_satisfied_cache" + (!type already_seen 393) + (!srcfileloc "cp/constraint.cc" 2533) + (!options + (!option deletable string "") + ) + ) + (!pair "sat_cache" + (!type already_seen 2054) + (!srcfileloc "cp/constraint.cc" 2530) + (!options + (!option deletable string "") + ) + ) + (!pair "failed_type_completions" + (!type already_seen 84) + (!srcfileloc "cp/constraint.cc" 2388) + (!options + (!option deletable string "") + ) + ) + (!pair "decl_constraints" + (!type already_seen 938) + (!srcfileloc "cp/constraint.cc" 1214) + (!options + (!option cache string "") + ) + ) + (!pair "normalized_map" + (!type already_seen 393) + (!srcfileloc "cp/constraint.cc" 834) + (!options + (!option deletable string "") + ) + ) + (!pair "atom_cache" + (!type already_seen 2050) + (!srcfileloc "cp/constraint.cc" 745) + (!options + (!option deletable string "") + ) + ) + (!pair "cv_cache" + (!type already_seen 393) + (!srcfileloc "cp/constexpr.cc" 8077) + (!options + (!option deletable string "") + ) + ) + (!pair "fundef_copies_table" + (!type already_seen 944) + (!srcfileloc "cp/constexpr.cc" 1264) + nil ) + (!pair "constexpr_call_table" + (!type already_seen 2042) + (!srcfileloc "cp/constexpr.cc" 1212) + nil ) + (!pair "constexpr_fundef_table" + (!type already_seen 2036) + (!srcfileloc "cp/constexpr.cc" 151) + nil ) + (!pair "dvirt_fn" + (!type already_seen 23) + (!srcfileloc "cp/class.cc" 9834) + nil ) + (!pair "abort_fndecl_addr" + (!type already_seen 23) + (!srcfileloc "cp/class.cc" 9833) + nil ) + (!pair "enum_to_min_precision" + (!type already_seen 2032) + (!srcfileloc "cp/class.cc" 3392) + (!options + (!option deletable string "") + ) + ) + (!pair "sizeof_biggest_empty_class" + (!type already_seen 23) + (!srcfileloc "cp/class.cc" 112) + nil ) + (!pair "default_arg_context" + (!type already_seen 84) + (!srcfileloc "cp/call.cc" 8669) + nil ) + (!pair "unemitted_tinfo_decls" + (!type already_seen 84) + (!srcfileloc "cp/cp-tree.h" 7464) + nil ) + (!pair "ovl_op_alternate" + (!type array 2337 nil gc_used "OVL_OP_MAX" + (!type already_seen 8) + ) + (!srcfileloc "cp/cp-tree.h" 6125) + nil ) + (!pair "ovl_op_mapping" + (!type array 2338 nil gc_used "MAX_TREE_CODES" + (!type already_seen 8) + ) + (!srcfileloc "cp/cp-tree.h" 6123) + nil ) + (!pair "ovl_op_info" + (!type array 2339 nil gc_used "2" + (!type array 2340 nil gc_used "OVL_OP_MAX" + (!type already_seen 1967) + ) + ) + (!srcfileloc "cp/cp-tree.h" 6121) + nil ) + (!pair "dynamic_initializers" + (!type already_seen 944) + (!srcfileloc "cp/cp-tree.h" 5814) + nil ) + (!pair "tls_aggregates" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 5810) + nil ) + (!pair "static_aggregates" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 5808) + nil ) + (!pair "keyed_classes" + (!type already_seen 84) + (!srcfileloc "cp/cp-tree.h" 5713) + nil ) + (!pair "static_decls" + (!type already_seen 84) + (!srcfileloc "cp/cp-tree.h" 5709) + nil ) + (!pair "integer_two_node" + (!type already_seen 23) + (!srcfileloc "cp/cp-tree.h" 5619) + nil ) + (!pair "scope_chain" + (!type already_seen 1952) + (!srcfileloc "cp/cp-tree.h" 1870) + nil ) + (!pair "cp_global_trees" + (!type array 2341 nil gc_used "CPTI_MAX" + (!type already_seen 23) + ) + (!srcfileloc "cp/cp-tree.h" 233) + nil ) + (!pair "current_omp_declare_target_attribute" + (!type already_seen 2) + (!srcfileloc "c/c-lang.h" 65) + nil ) + (!pair "the_parser" + (!type already_seen 1926) + (!srcfileloc "c/c-parser.cc" 267) + nil ) + (!pair "locus" + (!type already_seen 23) + (!srcfileloc "c-family/c-format.cc" 71) + nil ) + (!pair "local_cgraph_node_ptr_node" + (!type already_seen 23) + (!srcfileloc "c-family/c-format.cc" 70) + nil ) + (!pair "local_gimple_ptr_node" + (!type already_seen 23) + (!srcfileloc "c-family/c-format.cc" 69) + nil ) + (!pair "local_event_ptr_node" + (!type already_seen 23) + (!srcfileloc "c-family/c-format.cc" 68) + nil ) + (!pair "local_tree_type_node" + (!type already_seen 23) + (!srcfileloc "c-family/c-format.cc" 67) + nil ) + (!pair "options_stack" + (!type already_seen 1909) + (!srcfileloc "c-family/c-pragma.cc" 1054) + nil ) + (!pair "pending_redefine_extname" + (!type already_seen 1907) + (!srcfileloc "c-family/c-pragma.cc" 496) + nil ) + (!pair "pending_weaks" + (!type already_seen 1904) + (!srcfileloc "c-family/c-pragma.cc" 248) + nil ) + (!pair "alignment_stack" + (!type already_seen 1901) + (!srcfileloc "c-family/c-pragma.cc" 53) + nil ) + (!pair "pragma_extern_prefix" + (!type already_seen 23) + (!srcfileloc "c-family/c-pragma.h" 268) + nil ) + (!pair "lazy_hex_fp_value_count" + (!type already_seen 2) + (!srcfileloc "c-family/c-cppbuiltin.cc" 1754) + nil ) + (!pair "lazy_hex_fp_values" + (!type array 2342 nil gc_used "LAZY_HEX_FP_VALUES_CNT" + (!type already_seen 1897) + ) + (!srcfileloc "c-family/c-cppbuiltin.cc" 1753) + nil ) + (!pair "g_string_concat_db" + (!type already_seen 729) + (!srcfileloc "c-family/c-common.h" 1139) + nil ) + (!pair "registered_builtin_types" + (!type already_seen 23) + (!srcfileloc "c-family/c-common.h" 1122) + nil ) + (!pair "pending_lang_change" + (!type already_seen 2) + (!srcfileloc "c-family/c-common.h" 1075) + nil ) + (!pair "c_global_trees" + (!type array 2343 nil gc_used "CTI_MAX" + (!type already_seen 23) + ) + (!srcfileloc "c-family/c-common.h" 533) + nil ) + (!pair "ridpointers" + (!type already_seen 24) + (!srcfileloc "c-family/c-common.h" 308) + (!options + (!option length string "(int) RID_MAX") + ) + ) + (!pair "tree_vector_cache" + (!type already_seen 1884) + (!srcfileloc "c-family/c-common.cc" 8250) + (!options + (!option deletable string "") + ) + ) + (!pair "optimize_args" + (!type already_seen 1882) + (!srcfileloc "c-family/c-common.cc" 5851) + nil ) + (!pair "compound_literal_number" + (!type already_seen 2) + (!srcfileloc "c-family/c-common.cc" 4728) + nil ) + (!pair "built_in_attributes" + (!type array 2344 nil gc_used "(int) ATTR_LAST" + (!type already_seen 23) + ) + (!srcfileloc "c-family/c-common.cc" 4015) + nil ) + (!pair "ext_block" + (!type already_seen 23) + (!srcfileloc "c/c-decl.cc" 12223) + nil ) + (!pair "last_structptr_types" + (!type array 2345 nil gc_used "builtin_structptr_type_count" + (!type already_seen 23) + ) + (!srcfileloc "c/c-decl.cc" 1663) + nil ) + (!pair "c_inline_statics" + (!type already_seen 1874) + (!srcfileloc "c/c-decl.cc" 567) + nil ) + (!pair "binding_freelist" + (!type already_seen 639) + (!srcfileloc "c/c-decl.cc" 519) + (!options + (!option deletable string "") + ) + ) + (!pair "scope_freelist" + (!type already_seen 645) + (!srcfileloc "c/c-decl.cc" 515) + (!options + (!option deletable string "") + ) + ) + (!pair "external_scope" + (!type already_seen 645) + (!srcfileloc "c/c-decl.cc" 511) + nil ) + (!pair "file_scope" + (!type already_seen 645) + (!srcfileloc "c/c-decl.cc" 506) + nil ) + (!pair "current_function_scope" + (!type already_seen 645) + (!srcfileloc "c/c-decl.cc" 502) + nil ) + (!pair "current_scope" + (!type already_seen 645) + (!srcfileloc "c/c-decl.cc" 496) + nil ) + (!pair "visible_builtins" + (!type already_seen 23) + (!srcfileloc "c/c-decl.cc" 129) + nil ) + (!pair "c_stmt_tree" + (!type already_seen 453) + (!srcfileloc "c/c-decl.cc" 122) + nil ) + (!pair "gnat_eh_personality_decl" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/misc.cc" 1284) + nil ) + (!pair "built_in_attributes" + (!type array 2346 nil gc_used "(int) ATTR_LAST" + (!type already_seen 23) + ) + (!srcfileloc "ada/gcc-interface/utils.cc" 6316) + nil ) + (!pair "builtin_types" + (!type array 2347 nil gc_used "(int) BT_LAST + 1" + (!type already_seen 23) + ) + (!srcfileloc "ada/gcc-interface/utils.cc" 6149) + nil ) + (!pair "dummy_global" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/utils.cc" 5902) + nil ) + (!pair "pad_type_hash_table" + (!type already_seen 1861) + (!srcfileloc "ada/gcc-interface/utils.cc" 327) + (!options + (!option cache string "") + ) + ) + (!pair "packable_type_hash_table" + (!type already_seen 1857) + (!srcfileloc "ada/gcc-interface/utils.cc" 304) + (!options + (!option cache string "") + ) + ) + (!pair "free_block_chain" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/utils.cc" 281) + (!options + (!option deletable string "") + ) + ) + (!pair "builtin_decls" + (!type already_seen 84) + (!srcfileloc "ada/gcc-interface/utils.cc" 278) + nil ) + (!pair "global_decls" + (!type already_seen 84) + (!srcfileloc "ada/gcc-interface/utils.cc" 275) + nil ) + (!pair "global_context" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/utils.cc" 272) + nil ) + (!pair "free_binding_level" + (!type already_seen 1853) + (!srcfileloc "ada/gcc-interface/utils.cc" 269) + (!options + (!option deletable string "") + ) + ) + (!pair "current_binding_level" + (!type already_seen 1853) + (!srcfileloc "ada/gcc-interface/utils.cc" 266) + nil ) + (!pair "float_types" + (!type array 2348 nil gc_used "NUM_MACHINE_MODES" + (!type already_seen 23) + ) + (!srcfileloc "ada/gcc-interface/utils.cc" 250) + nil ) + (!pair "signed_and_unsigned_types" + (!type array 2349 nil gc_used "2 * MAX_BITS_PER_WORD + 1" + (!type array 2350 nil gc_used "2" + (!type already_seen 23) + ) + ) + (!srcfileloc "ada/gcc-interface/utils.cc" 247) + nil ) + (!pair "dummy_node_table" + (!type already_seen 24) + (!srcfileloc "ada/gcc-interface/utils.cc" 230) + (!options + (!option length string "max_gnat_nodes") + ) + ) + (!pair "associate_gnat_to_gnu" + (!type already_seen 24) + (!srcfileloc "ada/gcc-interface/utils.cc" 218) + (!options + (!option length string "max_gnat_nodes") + ) + ) + (!pair "thunk_labelno" + (!type already_seen 2) + (!srcfileloc "ada/gcc-interface/trans.cc" 10766) + nil ) + (!pair "gnu_loop_stack" + (!type already_seen 1850) + (!srcfileloc "ada/gcc-interface/trans.cc" 214) + nil ) + (!pair "gnu_return_var_stack" + (!type already_seen 84) + (!srcfileloc "ada/gcc-interface/trans.cc" 182) + nil ) + (!pair "gnu_return_label_stack" + (!type already_seen 84) + (!srcfileloc "ada/gcc-interface/trans.cc" 178) + nil ) + (!pair "gnu_elab_proc_stack" + (!type already_seen 84) + (!srcfileloc "ada/gcc-interface/trans.cc" 174) + nil ) + (!pair "gnu_incoming_exc_ptr" + (!type already_seen 23) + (!srcfileloc "ada/gcc-interface/trans.cc" 171) + nil ) + (!pair "gnu_except_ptr_stack" + (!type already_seen 84) + (!srcfileloc "ada/gcc-interface/trans.cc" 168) + nil ) + (!pair "elab_info_list" + (!type already_seen 1842) + (!srcfileloc "ada/gcc-interface/trans.cc" 163) + nil ) + (!pair "stmt_group_free_list" + (!type already_seen 1840) + (!srcfileloc "ada/gcc-interface/trans.cc" 150) + (!options + (!option deletable string "") + ) + ) + (!pair "current_stmt_group" + (!type already_seen 1840) + (!srcfileloc "ada/gcc-interface/trans.cc" 147) + nil ) + (!pair "dummy_to_subprog_map" + (!type already_seen 1835) + (!srcfileloc "ada/gcc-interface/decl.cc" 198) + (!options + (!option cache string "") + ) + ) + (!pair "annotate_value_cache" + (!type already_seen 1827) + (!srcfileloc "ada/gcc-interface/decl.cc" 157) + (!options + (!option cache string "") + ) + ) + (!pair "gnat_raise_decls_ext" + (!type array 2351 nil gc_used "(int) LAST_REASON_CODE + 1" + (!type already_seen 23) + ) + (!srcfileloc "ada/gcc-interface/gigi.h" 454) + nil ) + (!pair "gnat_raise_decls" + (!type array 2352 nil gc_used "(int) LAST_REASON_CODE + 1" + (!type already_seen 23) + ) + (!srcfileloc "ada/gcc-interface/gigi.h" 453) + nil ) + (!pair "gnat_std_decls" + (!type array 2353 nil gc_used "(int) ADT_LAST" + (!type already_seen 23) + ) + (!srcfileloc "ada/gcc-interface/gigi.h" 452) + nil ) + (!pair "ix86_previous_fndecl" + (!type already_seen 23) + (!srcfileloc "config/i386/i386-options.cc" 2965) + nil ) + (!pair "target_attribute_cache" + (!type array 2354 nil gc_used "3" + (!type already_seen 23) + ) + (!srcfileloc "config/i386/i386-options.cc" 1429) + nil ) + (!pair "vselect_insn" + (!type already_seen 296) + (!srcfileloc "config/i386/i386-expand.cc" 18304) + nil ) + (!pair "ix86_cpu_features2_var" + (!type already_seen 23) + (!srcfileloc "config/i386/i386-builtins.cc" 2195) + nil ) + (!pair "ix86_cpu_features2_type_node" + (!type already_seen 23) + (!srcfileloc "config/i386/i386-builtins.cc" 2194) + nil ) + (!pair "ix86_cpu_model_var" + (!type already_seen 23) + (!srcfileloc "config/i386/i386-builtins.cc" 2193) + nil ) + (!pair "ix86_cpu_model_type_node" + (!type already_seen 23) + (!srcfileloc "config/i386/i386-builtins.cc" 2192) + nil ) + (!pair "ix86_builtins" + (!type array 2355 nil gc_used "(int) IX86_BUILTIN_MAX" + (!type already_seen 23) + ) + (!srcfileloc "config/i386/i386-builtins.cc" 223) + nil ) + (!pair "ix86_builtin_func_type_tab" + (!type array 2356 nil gc_used "(int) IX86_BT_LAST_ALIAS + 1" + (!type already_seen 23) + ) + (!srcfileloc "config/i386/i386-builtins.cc" 177) + nil ) + (!pair "ix86_builtin_type_tab" + (!type array 2357 nil gc_used "(int) IX86_BT_LAST_CPTR + 1" + (!type already_seen 23) + ) + (!srcfileloc "config/i386/i386-builtins.cc" 126) + nil ) + (!pair "omp_requires_mask" + (!type already_seen 2) + (!srcfileloc "omp-general.h" 137) + nil ) + (!pair "internal_fn_fnspec_array" + (!type array 2358 nil gc_used "IFN_LAST + 1" + (!type already_seen 23) + ) + (!srcfileloc "internal-fn.h" 128) + nil ) + (!pair "odr_enums" + (!type already_seen 84) + (!srcfileloc "ipa-devirt.cc" 502) + nil ) + (!pair "odr_types_ptr" + (!type already_seen 1798) + (!srcfileloc "ipa-devirt.cc" 498) + nil ) + (!pair "ubsan_vptr_type_cache_decl" + (!type already_seen 23) + (!srcfileloc "ubsan.cc" 1203) + nil ) + (!pair "ubsan_ids" + (!type array 2359 nil gc_used "2" + (!type already_seen 2) + ) + (!srcfileloc "ubsan.cc" 344) + nil ) + (!pair "ubsan_source_location_type" + (!type already_seen 23) + (!srcfileloc "ubsan.cc" 239) + nil ) + (!pair "ubsan_type_descriptor_type" + (!type already_seen 23) + (!srcfileloc "ubsan.cc" 189) + nil ) + (!pair "decl_tree_for_type" + (!type already_seen 1782) + (!srcfileloc "ubsan.cc" 82) + (!options + (!option cache string "") + ) + ) + (!pair "hwasan_ctor_statements" + (!type already_seen 23) + (!srcfileloc "asan.cc" 4574) + nil ) + (!pair "asan_ctor_statements" + (!type already_seen 23) + (!srcfileloc "asan.cc" 3531) + nil ) + (!pair "asan_detect_stack_use_after_return" + (!type already_seen 23) + (!srcfileloc "asan.cc" 433) + nil ) + (!pair "shadow_ptr_types" + (!type array 2360 nil gc_used "3" + (!type already_seen 23) + ) + (!srcfileloc "asan.cc" 430) + nil ) + (!pair "hwasan_frame_base_init_seq" + (!type already_seen 296) + (!srcfileloc "asan.cc" 283) + nil ) + (!pair "hwasan_frame_base_ptr" + (!type already_seen 99) + (!srcfileloc "asan.cc" 272) + nil ) + (!pair "vtbl_mangled_name_ids" + (!type already_seen 84) + (!srcfileloc "vtable-verify.cc" 309) + nil ) + (!pair "vtbl_mangled_name_types" + (!type already_seen 84) + (!srcfileloc "vtable-verify.cc" 308) + nil ) + (!pair "verify_vtbl_ptr_fndecl" + (!type already_seen 23) + (!srcfileloc "vtable-verify.cc" 151) + nil ) + (!pair "ipa_fn_summaries" + (!type already_seen 1760) + (!srcfileloc "ipa-fnsummary.h" 250) + nil ) + (!pair "tm_wrap_map" + (!type already_seen 1717) + (!srcfileloc "trans-mem.cc" 468) + (!options + (!option cache string "") + ) + ) + (!pair "ipcp_transformation_sum" + (!type already_seen 1714) + (!srcfileloc "ipa-prop.h" 1065) + nil ) + (!pair "ipa_edge_args_sum" + (!type already_seen 1711) + (!srcfileloc "ipa-prop.h" 1038) + nil ) + (!pair "ipa_node_params_sum" + (!type already_seen 1709) + (!srcfileloc "ipa-prop.h" 1036) + nil ) + (!pair "ipa_escaped_pt" + (!type already_seen 385) + (!srcfileloc "tree-ssa-alias.h" 183) + nil ) + (!pair "free_phinodes" + (!type array 2361 nil gc_used "NUM_BUCKETS - 2" + (!type already_seen 1690) + ) + (!srcfileloc "tree-phinodes.cc" 70) + (!options + (!option deletable string "") + ) + ) + (!pair "clone_fn_ids" + (!type already_seen 1688) + (!srcfileloc "cgraphclones.cc" 466) + nil ) + (!pair "ix86_tls_stack_chk_guard_decl" + (!type already_seen 23) + (!srcfileloc "config/i386/i386.cc" 22464) + nil ) + (!pair "dllimport_map" + (!type already_seen 1667) + (!srcfileloc "config/i386/i386.cc" 11835) + (!options + (!option cache string "") + ) + ) + (!pair "ix86_tls_module_base_symbol" + (!type already_seen 99) + (!srcfileloc "config/i386/i386.cc" 11438) + nil ) + (!pair "ix86_tls_symbol" + (!type already_seen 99) + (!srcfileloc "config/i386/i386.cc" 11411) + nil ) + (!pair "split_stack_fn_large" + (!type already_seen 99) + (!srcfileloc "config/i386/i386.cc" 9868) + nil ) + (!pair "split_stack_fn" + (!type already_seen 99) + (!srcfileloc "config/i386/i386.cc" 9863) + nil ) + (!pair "queued_cfa_restores" + (!type already_seen 99) + (!srcfileloc "config/i386/i386.cc" 7218) + nil ) + (!pair "stack_chk_fail_decl" + (!type already_seen 23) + (!srcfileloc "targhooks.cc" 910) + nil ) + (!pair "stack_chk_guard_decl" + (!type already_seen 23) + (!srcfileloc "targhooks.cc" 877) + nil ) + (!pair "critical_name_mutexes" + (!type already_seen 393) + (!srcfileloc "omp-low.cc" 10338) + nil ) + (!pair "omp_declare_variant_alt" + (!type already_seen 1654) + (!srcfileloc "omp-general.cc" 2124) + nil ) + (!pair "omp_declare_variants" + (!type already_seen 1651) + (!srcfileloc "omp-general.cc" 2101) + nil ) + (!pair "offload_vars" + (!type already_seen 84) + (!srcfileloc "omp-offload.h" 30) + nil ) + (!pair "offload_funcs" + (!type already_seen 84) + (!srcfileloc "omp-offload.h" 29) + nil ) + (!pair "descriptor_type" + (!type already_seen 23) + (!srcfileloc "tree-nested.cc" 643) + nil ) + (!pair "trampoline_type" + (!type already_seen 23) + (!srcfileloc "tree-nested.cc" 603) + nil ) + (!pair "ic_tuple_callee_field" + (!type already_seen 23) + (!srcfileloc "tree-profile.cc" 74) + nil ) + (!pair "ic_tuple_counters_field" + (!type already_seen 23) + (!srcfileloc "tree-profile.cc" 73) + nil ) + (!pair "ic_tuple_var" + (!type already_seen 23) + (!srcfileloc "tree-profile.cc" 72) + nil ) + (!pair "tree_time_profiler_counter" + (!type already_seen 23) + (!srcfileloc "tree-profile.cc" 69) + nil ) + (!pair "tree_ior_profiler_fn" + (!type already_seen 23) + (!srcfileloc "tree-profile.cc" 68) + nil ) + (!pair "tree_average_profiler_fn" + (!type already_seen 23) + (!srcfileloc "tree-profile.cc" 67) + nil ) + (!pair "tree_indirect_call_profiler_fn" + (!type already_seen 23) + (!srcfileloc "tree-profile.cc" 66) + nil ) + (!pair "tree_topn_values_profiler_fn" + (!type already_seen 23) + (!srcfileloc "tree-profile.cc" 65) + nil ) + (!pair "tree_pow2_profiler_fn" + (!type already_seen 23) + (!srcfileloc "tree-profile.cc" 64) + nil ) + (!pair "tree_interval_profiler_fn" + (!type already_seen 23) + (!srcfileloc "tree-profile.cc" 63) + nil ) + (!pair "gcov_type_node" + (!type already_seen 23) + (!srcfileloc "tree-profile.cc" 62) + nil ) + (!pair "scalar_evolution_info" + (!type already_seen 1636) + (!srcfileloc "tree-scalar-evolution.cc" 312) + nil ) + (!pair "tmp_var_id_num" + (!type already_seen 2) + (!srcfileloc "gimple-expr.cc" 415) + nil ) + (!pair "stmt_list_cache" + (!type already_seen 84) + (!srcfileloc "tree-iterator.cc" 31) + (!options + (!option deletable string "") + ) + ) + (!pair "addr_list" + (!type already_seen 220) + (!srcfileloc "tree-ssa-loop-ivopts.cc" 2593) + nil ) + (!pair "mem_addr_template_list" + (!type already_seen 1596) + (!srcfileloc "tree-ssa-address.cc" 95) + nil ) + (!pair "elf_fini_array_section" + (!type already_seen 213) + (!srcfileloc "varasm.cc" 8386) + nil ) + (!pair "elf_init_array_section" + (!type already_seen 213) + (!srcfileloc "varasm.cc" 8385) + nil ) + (!pair "tm_clone_hash" + (!type already_seen 1574) + (!srcfileloc "varasm.cc" 6374) + (!options + (!option cache string "") + ) + ) + (!pair "weakref_targets" + (!type already_seen 23) + (!srcfileloc "varasm.cc" 6029) + nil ) + (!pair "const_desc_htab" + (!type already_seen 1567) + (!srcfileloc "varasm.cc" 3093) + nil ) + (!pair "initial_trampoline" + (!type already_seen 99) + (!srcfileloc "varasm.cc" 2757) + nil ) + (!pair "weak_decls" + (!type already_seen 23) + (!srcfileloc "varasm.cc" 2552) + nil ) + (!pair "pending_assemble_externals" + (!type already_seen 23) + (!srcfileloc "varasm.cc" 2483) + nil ) + (!pair "shared_constant_pool" + (!type already_seen 1004) + (!srcfileloc "varasm.cc" 210) + nil ) + (!pair "anchor_labelno" + (!type already_seen 2) + (!srcfileloc "varasm.cc" 207) + nil ) + (!pair "object_block_htab" + (!type already_seen 1564) + (!srcfileloc "varasm.cc" 204) + nil ) + (!pair "section_htab" + (!type already_seen 1561) + (!srcfileloc "varasm.cc" 193) + nil ) + (!pair "unnamed_sections" + (!type already_seen 213) + (!srcfileloc "varasm.cc" 178) + nil ) + (!pair "const_labelno" + (!type already_seen 2) + (!srcfileloc "varasm.cc" 85) + nil ) + (!pair "weak_global_object_name" + (!type already_seen 11) + (!srcfileloc "varasm.cc" 71) + nil ) + (!pair "first_global_object_name" + (!type already_seen 11) + (!srcfileloc "varasm.cc" 70) + nil ) + (!pair "gcc_eh_personality_decl" + (!type already_seen 23) + (!srcfileloc "tree.cc" 11957) + nil ) + (!pair "anon_cnt" + (!type already_seen 2) + (!srcfileloc "tree.cc" 8675) + nil ) + (!pair "nonstandard_boolean_type_cache" + (!type array 2362 nil gc_used "MAX_BOOL_CACHED_PREC + 1" + (!type already_seen 23) + ) + (!srcfileloc "tree.cc" 7056) + nil ) + (!pair "nonstandard_integer_type_cache" + (!type array 2363 nil gc_used "2 * MAX_INT_CACHED_PREC + 2" + (!type already_seen 23) + ) + (!srcfileloc "tree.cc" 7007) + nil ) + (!pair "debug_args_for_decl" + (!type already_seen 1557) + (!srcfileloc "tree.cc" 248) + (!options + (!option cache string "") + ) + ) + (!pair "value_expr_for_decl" + (!type already_seen 1555) + (!srcfileloc "tree.cc" 245) + (!options + (!option cache string "") + ) + ) + (!pair "debug_expr_for_decl" + (!type already_seen 1555) + (!srcfileloc "tree.cc" 242) + (!options + (!option cache string "") + ) + ) + (!pair "cl_option_hash_table" + (!type already_seen 1553) + (!srcfileloc "tree.cc" 236) + (!options + (!option cache string "") + ) + ) + (!pair "cl_target_option_node" + (!type already_seen 23) + (!srcfileloc "tree.cc" 228) + nil ) + (!pair "cl_optimization_node" + (!type already_seen 23) + (!srcfileloc "tree.cc" 227) + nil ) + (!pair "poly_int_cst_hash_table" + (!type already_seen 1550) + (!srcfileloc "tree.cc" 220) + (!options + (!option cache string "") + ) + ) + (!pair "int_cst_hash_table" + (!type already_seen 1547) + (!srcfileloc "tree.cc" 209) + (!options + (!option cache string "") + ) + ) + (!pair "int_cst_node" + (!type already_seen 23) + (!srcfileloc "tree.cc" 201) + nil ) + (!pair "type_hash_table" + (!type already_seen 1544) + (!srcfileloc "tree.cc" 198) + (!options + (!option cache string "") + ) + ) + (!pair "next_debug_decl_uid" + (!type already_seen 2) + (!srcfileloc "tree.cc" 166) + nil ) + (!pair "next_type_uid" + (!type already_seen 2) + (!srcfileloc "tree.cc" 163) + nil ) + (!pair "next_decl_uid" + (!type already_seen 2) + (!srcfileloc "tree.cc" 161) + nil ) + (!pair "spd" + (!type already_seen 1540) + (!srcfileloc "stringpool.cc" 246) + nil ) + (!pair "size_functions" + (!type already_seen 84) + (!srcfileloc "stor-layout.cc" 88) + nil ) + (!pair "cfg_layout_function_header" + (!type already_seen 296) + (!srcfileloc "cfgrtl.cc" 78) + nil ) + (!pair "cfg_layout_function_footer" + (!type already_seen 296) + (!srcfileloc "cfgrtl.cc" 77) + nil ) + (!pair "libfunc_decls" + (!type already_seen 1515) + (!srcfileloc "optabs-libfuncs.cc" 720) + nil ) + (!pair "unused_expr_list" + (!type already_seen 99) + (!srcfileloc "lists.cc" 34) + (!options + (!option deletable string "") + ) + ) + (!pair "unused_insn_list" + (!type already_seen 99) + (!srcfileloc "lists.cc" 31) + (!options + (!option deletable string "") + ) + ) + (!pair "queue" + (!type already_seen 84) + (!srcfileloc "godump.cc" 57) + nil ) + (!pair "test_insn" + (!type already_seen 296) + (!srcfileloc "gcse.cc" 823) + nil ) + (!pair "dummy_unittesting_tree" + (!type already_seen 23) + (!srcfileloc "ggc-tests.cc" 443) + nil ) + (!pair "root_user_struct_ptr" + (!type already_seen 1493) + (!srcfileloc "ggc-tests.cc" 390) + nil ) + (!pair "root_test_node" + (!type already_seen 1491) + (!srcfileloc "ggc-tests.cc" 328) + nil ) + (!pair "test_some_other_subclass_as_base_ptr" + (!type already_seen 1485) + (!srcfileloc "ggc-tests.cc" 275) + nil ) + (!pair "test_some_subclass_as_base_ptr" + (!type already_seen 1485) + (!srcfileloc "ggc-tests.cc" 274) + nil ) + (!pair "test_some_other_subclass" + (!type already_seen 1489) + (!srcfileloc "ggc-tests.cc" 273) + nil ) + (!pair "test_some_subclass" + (!type already_seen 1487) + (!srcfileloc "ggc-tests.cc" 272) + nil ) + (!pair "test_example_base" + (!type already_seen 1485) + (!srcfileloc "ggc-tests.cc" 271) + nil ) + (!pair "test_of_deletable" + (!type already_seen 911) + (!srcfileloc "ggc-tests.cc" 205) + (!options + (!option deletable string "") + ) + ) + (!pair "root_test_of_union_2" + (!type already_seen 1482) + (!srcfileloc "ggc-tests.cc" 135) + nil ) + (!pair "root_test_of_union_1" + (!type already_seen 1482) + (!srcfileloc "ggc-tests.cc" 134) + nil ) + (!pair "root_test_of_length" + (!type already_seen 1479) + (!srcfileloc "ggc-tests.cc" 68) + nil ) + (!pair "root_test_struct" + (!type already_seen 911) + (!srcfileloc "ggc-tests.cc" 42) + nil ) + (!pair "sjlj_fc_type_node" + (!type already_seen 23) + (!srcfileloc "except.cc" 156) + nil ) + (!pair "setjmp_fn" + (!type already_seen 23) + (!srcfileloc "except.cc" 153) + nil ) + (!pair "type_to_runtime_map" + (!type already_seen 1466) + (!srcfileloc "except.cc" 151) + nil ) + (!pair "call_site_base" + (!type already_seen 2) + (!srcfileloc "except.cc" 149) + nil ) + (!pair "next_block_index" + (!type already_seen 2) + (!srcfileloc "function.cc" 4620) + nil ) + (!pair "temp_slot_address_table" + (!type already_seen 1457) + (!srcfileloc "function.cc" 608) + nil ) + (!pair "epilogue_insn_hash" + (!type already_seen 1453) + (!srcfileloc "function.cc" 133) + (!options + (!option cache string "") + ) + ) + (!pair "prologue_insn_hash" + (!type already_seen 1453) + (!srcfileloc "function.cc" 131) + (!options + (!option cache string "") + ) + ) + (!pair "funcdef_no" + (!type already_seen 2) + (!srcfileloc "function.cc" 113) + nil ) + (!pair "stack_check_libfunc" + (!type already_seen 99) + (!srcfileloc "explow.cc" 1638) + nil ) + (!pair "hard_reg_clobbers" + (!type array 2364 nil gc_used "NUM_MACHINE_MODES" + (!type array 2365 nil gc_used "FIRST_PSEUDO_REGISTER" + (!type already_seen 99) + ) + ) + (!srcfileloc "emit-rtl.cc" 6472) + (!options + (!option deletable string "") + ) + ) + (!pair "free_sequence_stack" + (!type already_seen 992) + (!srcfileloc "emit-rtl.cc" 5462) + (!options + (!option deletable string "") + ) + ) + (!pair "spill_slot_decl" + (!type already_seen 23) + (!srcfileloc "emit-rtl.cc" 2648) + nil ) + (!pair "const_fixed_htab" + (!type already_seen 1441) + (!srcfileloc "emit-rtl.cc" 188) + (!options + (!option cache string "") + ) + ) + (!pair "const_double_htab" + (!type already_seen 1438) + (!srcfileloc "emit-rtl.cc" 179) + (!options + (!option cache string "") + ) + ) + (!pair "reg_attrs_htab" + (!type already_seen 1435) + (!srcfileloc "emit-rtl.cc" 170) + (!options + (!option cache string "") + ) + ) + (!pair "const_poly_int_htab" + (!type already_seen 1432) + (!srcfileloc "emit-rtl.cc" 161) + (!options + (!option cache string "") + ) + ) + (!pair "const_wide_int_htab" + (!type already_seen 1429) + (!srcfileloc "emit-rtl.cc" 151) + (!options + (!option cache string "") + ) + ) + (!pair "const_int_htab" + (!type already_seen 1426) + (!srcfileloc "emit-rtl.cc" 143) + (!options + (!option cache string "") + ) + ) + (!pair "label_num" + (!type already_seen 2) + (!srcfileloc "emit-rtl.cc" 94) + nil ) + (!pair "x_rtl" + (!type already_seen 1423) + (!srcfileloc "emit-rtl.h" 338) + nil ) + (!pair "shift_test" + (!type already_seen 99) + (!srcfileloc "dojump.cc" 127) + nil ) + (!pair "and_test" + (!type already_seen 99) + (!srcfileloc "dojump.cc" 126) + nil ) + (!pair "and_reg" + (!type already_seen 99) + (!srcfileloc "dojump.cc" 125) + nil ) + (!pair "vector_last_nunits" + (!type already_seen 2) + (!srcfileloc "tree-vect-generic.cc" 146) + nil ) + (!pair "vector_last_type" + (!type already_seen 23) + (!srcfileloc "tree-vect-generic.cc" 145) + nil ) + (!pair "vector_inner_type" + (!type already_seen 23) + (!srcfileloc "tree-vect-generic.cc" 144) + nil ) + (!pair "funcs" + (!type already_seen 1407) + (!srcfileloc "btfout.cc" 105) + nil ) + (!pair "btf_var_ids" + (!type already_seen 1403) + (!srcfileloc "btfout.cc" 73) + nil ) + (!pair "btf_info_section" + (!type already_seen 213) + (!srcfileloc "btfout.cc" 42) + nil ) + (!pair "ctf_info_section" + (!type already_seen 213) + (!srcfileloc "ctfout.cc" 36) + nil ) + (!pair "tu_ctfc" + (!type already_seen 1399) + (!srcfileloc "ctfc.h" 341) + nil ) + (!pair "inline_entry_data_table" + (!type already_seen 1364) + (!srcfileloc "dwarf2out.cc" 24293) + nil ) + (!pair "external_die_map" + (!type already_seen 1349) + (!srcfileloc "dwarf2out.cc" 5938) + nil ) + (!pair "addr_index_table" + (!type already_seen 1346) + (!srcfileloc "dwarf2out.cc" 5072) + nil ) + (!pair "generic_type_instances" + (!type already_seen 84) + (!srcfileloc "dwarf2out.cc" 3713) + nil ) + (!pair "tmpl_value_parm_die_table" + (!type already_seen 1339) + (!srcfileloc "dwarf2out.cc" 3707) + nil ) + (!pair "label_num" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3705) + nil ) + (!pair "last_emitted_file" + (!type already_seen 504) + (!srcfileloc "dwarf2out.cc" 3702) + nil ) + (!pair "poc_label_num" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3699) + nil ) + (!pair "loclabel_num" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3696) + nil ) + (!pair "have_location_lists" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 3693) + nil ) + (!pair "ranges_by_label" + (!type already_seen 1337) + (!srcfileloc "dwarf2out.cc" 3690) + nil ) + (!pair "ranges_table" + (!type already_seen 1335) + (!srcfileloc "dwarf2out.cc" 3687) + nil ) + (!pair "macinfo_table" + (!type already_seen 1333) + (!srcfileloc "dwarf2out.cc" 3677) + nil ) + (!pair "pubtype_table" + (!type already_seen 1331) + (!srcfileloc "dwarf2out.cc" 3673) + nil ) + (!pair "pubname_table" + (!type already_seen 1331) + (!srcfileloc "dwarf2out.cc" 3669) + nil ) + (!pair "separate_line_info" + (!type already_seen 1329) + (!srcfileloc "dwarf2out.cc" 3661) + nil ) + (!pair "cold_text_section_line_info" + (!type already_seen 1291) + (!srcfileloc "dwarf2out.cc" 3658) + nil ) + (!pair "text_section_line_info" + (!type already_seen 1291) + (!srcfileloc "dwarf2out.cc" 3657) + nil ) + (!pair "cur_line_info_table" + (!type already_seen 1291) + (!srcfileloc "dwarf2out.cc" 3654) + nil ) + (!pair "abbrev_die_table" + (!type already_seen 1307) + (!srcfileloc "dwarf2out.cc" 3640) + nil ) + (!pair "cached_dw_loc_list_table" + (!type already_seen 1327) + (!srcfileloc "dwarf2out.cc" 3636) + nil ) + (!pair "call_arg_locations" + (!type already_seen 1320) + (!srcfileloc "dwarf2out.cc" 3608) + nil ) + (!pair "decl_loc_table" + (!type already_seen 1323) + (!srcfileloc "dwarf2out.cc" 3605) + nil ) + (!pair "common_block_die_table" + (!type already_seen 1314) + (!srcfileloc "dwarf2out.cc" 3539) + nil ) + (!pair "variable_value_hash" + (!type already_seen 1311) + (!srcfileloc "dwarf2out.cc" 3529) + nil ) + (!pair "decl_die_table" + (!type already_seen 1305) + (!srcfileloc "dwarf2out.cc" 3512) + nil ) + (!pair "file_table" + (!type already_seen 1302) + (!srcfileloc "dwarf2out.cc" 3501) + nil ) + (!pair "deferred_asm_name" + (!type already_seen 1298) + (!srcfileloc "dwarf2out.cc" 3490) + nil ) + (!pair "limbo_die_list" + (!type already_seen 1298) + (!srcfileloc "dwarf2out.cc" 3486) + nil ) + (!pair "cu_die_list" + (!type already_seen 1298) + (!srcfileloc "dwarf2out.cc" 3483) + nil ) + (!pair "comdat_type_list" + (!type already_seen 490) + (!srcfileloc "dwarf2out.cc" 3480) + nil ) + (!pair "single_comp_unit_die" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 3477) + nil ) + (!pair "zero_view_p" + (!type already_seen 386) + (!srcfileloc "dwarf2out.cc" 3400) + nil ) + (!pair "do_eh_frame" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 293) + nil ) + (!pair "current_unit_personality" + (!type already_seen 99) + (!srcfileloc "dwarf2out.cc" 290) + nil ) + (!pair "decltype_auto_die" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 282) + nil ) + (!pair "auto_die" + (!type already_seen 487) + (!srcfileloc "dwarf2out.cc" 279) + nil ) + (!pair "switch_cold_ranges" + (!type already_seen 1286) + (!srcfileloc "dwarf2out.cc" 276) + nil ) + (!pair "switch_text_ranges" + (!type already_seen 1286) + (!srcfileloc "dwarf2out.cc" 275) + nil ) + (!pair "last_cold_label" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 271) + nil ) + (!pair "last_text_label" + (!type already_seen 11) + (!srcfileloc "dwarf2out.cc" 270) + nil ) + (!pair "in_text_section_p" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 267) + nil ) + (!pair "cold_text_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 264) + nil ) + (!pair "have_multiple_function_sections" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 261) + nil ) + (!pair "dw2_string_counter" + (!type already_seen 2) + (!srcfileloc "dwarf2out.cc" 258) + nil ) + (!pair "skeleton_debug_str_hash" + (!type already_seen 1284) + (!srcfileloc "dwarf2out.cc" 256) + nil ) + (!pair "debug_line_str_hash" + (!type already_seen 1284) + (!srcfileloc "dwarf2out.cc" 235) + nil ) + (!pair "debug_str_hash" + (!type already_seen 1284) + (!srcfileloc "dwarf2out.cc" 233) + nil ) + (!pair "fde_vec" + (!type already_seen 1281) + (!srcfileloc "dwarf2out.cc" 215) + nil ) + (!pair "debug_frame_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 176) + nil ) + (!pair "debug_ranges_dwo_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 175) + nil ) + (!pair "debug_ranges_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 174) + nil ) + (!pair "debug_str_offsets_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 173) + nil ) + (!pair "debug_str_dwo_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 172) + nil ) + (!pair "debug_line_str_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 171) + nil ) + (!pair "debug_str_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 170) + nil ) + (!pair "debug_pubtypes_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 169) + nil ) + (!pair "debug_pubnames_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 168) + nil ) + (!pair "debug_loc_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 167) + nil ) + (!pair "debug_skeleton_line_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 166) + nil ) + (!pair "debug_line_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 165) + nil ) + (!pair "debug_macinfo_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 162) + nil ) + (!pair "debug_addr_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 161) + nil ) + (!pair "debug_aranges_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 160) + nil ) + (!pair "debug_skeleton_abbrev_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 159) + nil ) + (!pair "debug_abbrev_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 158) + nil ) + (!pair "debug_skeleton_info_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 157) + nil ) + (!pair "debug_info_section" + (!type already_seen 213) + (!srcfileloc "dwarf2out.cc" 156) + nil ) + (!pair "incomplete_types" + (!type already_seen 84) + (!srcfileloc "dwarf2out.cc" 153) + nil ) + (!pair "used_rtx_array" + (!type already_seen 220) + (!srcfileloc "dwarf2out.cc" 147) + nil ) + (!pair "ctf_unknown_die" + (!type already_seen 487) + (!srcfileloc "dwarf2ctf.cc" 56) + nil ) + (!pair "ctf_array_index_die" + (!type already_seen 487) + (!srcfileloc "dwarf2ctf.cc" 55) + nil ) + (!pair "ctf_void_die" + (!type already_seen 487) + (!srcfileloc "dwarf2ctf.cc" 54) + nil ) + (!pair "saved_do_cfi_asm" + (!type already_seen 2) + (!srcfileloc "dwarf2cfi.cc" 3694) + nil ) + (!pair "dwarf2out_cfi_label_num" + (!type already_seen 2) + (!srcfileloc "dwarf2cfi.cc" 197) + nil ) + (!pair "cie_return_save" + (!type already_seen 1274) + (!srcfileloc "dwarf2cfi.cc" 195) + nil ) + (!pair "cie_cfi_row" + (!type already_seen 1272) + (!srcfileloc "dwarf2cfi.cc" 193) + nil ) + (!pair "dw2_const_labelno" + (!type already_seen 2) + (!srcfileloc "dwarf2asm.cc" 913) + nil ) + (!pair "indirect_pool" + (!type already_seen 1270) + (!srcfileloc "dwarf2asm.cc" 911) + nil ) + (!pair "cie_cfi_vec" + (!type already_seen 472) + (!srcfileloc "dwarf2out.h" 339) + nil ) + (!pair "nowarn_map" + (!type already_seen 1266) + (!srcfileloc "diagnostic-spec.h" 138) + nil ) + (!pair "summaries_lto" + (!type already_seen 1249) + (!srcfileloc "ipa-modref.cc" 273) + nil ) + (!pair "optimization_summaries" + (!type already_seen 1247) + (!srcfileloc "ipa-modref.cc" 267) + nil ) + (!pair "summaries" + (!type already_seen 1247) + (!srcfileloc "ipa-modref.cc" 261) + nil ) + (!pair "lastfile_is_base" + (!type already_seen 2) + (!srcfileloc "dbxout.cc" 255) + nil ) + (!pair "lastlineno" + (!type already_seen 2) + (!srcfileloc "dbxout.cc" 251) + nil ) + (!pair "lastfile" + (!type already_seen 11) + (!srcfileloc "dbxout.cc" 247) + nil ) + (!pair "source_label_number" + (!type already_seen 2) + (!srcfileloc "dbxout.cc" 243) + nil ) + (!pair "dbxout_source_line_counter" + (!type already_seen 2) + (!srcfileloc "dbxout.cc" 238) + nil ) + (!pair "scope_labelno" + (!type already_seen 2) + (!srcfileloc "dbxout.cc" 234) + nil ) + (!pair "next_file_number" + (!type already_seen 2) + (!srcfileloc "dbxout.cc" 230) + nil ) + (!pair "preinit_symbols" + (!type already_seen 23) + (!srcfileloc "dbxout.cc" 197) + nil ) + (!pair "next_type_number" + (!type already_seen 2) + (!srcfileloc "dbxout.cc" 191) + nil ) + (!pair "typevec_len" + (!type already_seen 2) + (!srcfileloc "dbxout.cc" 185) + nil ) + (!pair "typevec" + (!type already_seen 1226) + (!srcfileloc "dbxout.cc" 181) + (!options + (!option length string "typevec_len") + ) + ) + (!pair "func_sums" + (!type already_seen 1220) + (!srcfileloc "ipa-sra.cc" 435) + nil ) + (!pair "ipa_vr_hash_table" + (!type already_seen 1121) + (!srcfileloc "ipa-prop.cc" 156) + (!options + (!option cache string "") + ) + ) + (!pair "ipa_bits_hash_table" + (!type already_seen 1118) + (!srcfileloc "ipa-prop.cc" 109) + (!options + (!option cache string "") + ) + ) + (!pair "version_info_node" + (!type already_seen 1041) + (!srcfileloc "cgraph.cc" 142) + nil ) + (!pair "cgraph_fnver_htab" + (!type already_seen 1114) + (!srcfileloc "cgraph.cc" 122) + nil ) + (!pair "callmem" + (!type already_seen 99) + (!srcfileloc "cselib.cc" 237) + nil ) + (!pair "bitmap_ggc_free" + (!type already_seen 388) + (!srcfileloc "bitmap.cc" 70) + (!options + (!option deletable string "") + ) + ) + (!pair "frame_set" + (!type already_seen 2) + (!srcfileloc "alias.cc" 1359) + nil ) + (!pair "varargs_set" + (!type already_seen 2) + (!srcfileloc "alias.cc" 1337) + nil ) + (!pair "alias_sets" + (!type already_seen 1107) + (!srcfileloc "alias.cc" 280) + nil ) + (!pair "reg_known_value" + (!type already_seen 220) + (!srcfileloc "alias.cc" 258) + nil ) + (!pair "old_reg_base_value" + (!type already_seen 220) + (!srcfileloc "alias.cc" 239) + (!options + (!option deletable string "") + ) + ) + (!pair "arg_base_value" + (!type already_seen 99) + (!srcfileloc "alias.cc" 231) + nil ) + (!pair "reg_base_value" + (!type already_seen 220) + (!srcfileloc "alias.cc" 226) + nil ) + (!pair "restinsn" + (!type already_seen 296) + (!srcfileloc "caller-save.cc" 107) + nil ) + (!pair "saveinsn" + (!type already_seen 296) + (!srcfileloc "caller-save.cc" 106) + nil ) + (!pair "test_mem" + (!type already_seen 99) + (!srcfileloc "caller-save.cc" 105) + nil ) + (!pair "test_reg" + (!type already_seen 99) + (!srcfileloc "caller-save.cc" 104) + nil ) + (!pair "restpat" + (!type already_seen 99) + (!srcfileloc "caller-save.cc" 103) + nil ) + (!pair "savepat" + (!type already_seen 99) + (!srcfileloc "caller-save.cc" 102) + nil ) + (!pair "thunks" + (!type already_seen 1095) + (!srcfileloc "symtab-thunks.cc" 62) + nil ) + (!pair "vtable_entry_type" + (!type already_seen 23) + (!srcfileloc "symtab-thunks.cc" 54) + nil ) + (!pair "saved_symtab" + (!type already_seen 1079) + (!srcfileloc "cgraph.h" 3552) + nil ) + (!pair "symtab" + (!type already_seen 1079) + (!srcfileloc "cgraph.h" 2553) + nil ) + (!pair "in_cold_section_p" + (!type already_seen 2) + (!srcfileloc "output.h" 539) + nil ) + (!pair "in_section" + (!type already_seen 213) + (!srcfileloc "output.h" 538) + nil ) + (!pair "bss_noswitch_section" + (!type already_seen 213) + (!srcfileloc "output.h" 536) + nil ) + (!pair "lcomm_section" + (!type already_seen 213) + (!srcfileloc "output.h" 535) + nil ) + (!pair "comm_section" + (!type already_seen 213) + (!srcfileloc "output.h" 534) + nil ) + (!pair "tls_comm_section" + (!type already_seen 213) + (!srcfileloc "output.h" 533) + nil ) + (!pair "eh_frame_section" + (!type already_seen 213) + (!srcfileloc "output.h" 532) + nil ) + (!pair "exception_section" + (!type already_seen 213) + (!srcfileloc "output.h" 531) + nil ) + (!pair "sbss_section" + (!type already_seen 213) + (!srcfileloc "output.h" 530) + nil ) + (!pair "bss_section" + (!type already_seen 213) + (!srcfileloc "output.h" 529) + nil ) + (!pair "dtors_section" + (!type already_seen 213) + (!srcfileloc "output.h" 528) + nil ) + (!pair "ctors_section" + (!type already_seen 213) + (!srcfileloc "output.h" 527) + nil ) + (!pair "sdata_section" + (!type already_seen 213) + (!srcfileloc "output.h" 526) + nil ) + (!pair "readonly_data_section" + (!type already_seen 213) + (!srcfileloc "output.h" 525) + nil ) + (!pair "data_section" + (!type already_seen 213) + (!srcfileloc "output.h" 524) + nil ) + (!pair "text_section" + (!type already_seen 213) + (!srcfileloc "output.h" 523) + nil ) + (!pair "types_used_by_cur_var_decl" + (!type already_seen 84) + (!srcfileloc "function.h" 497) + nil ) + (!pair "types_used_by_vars_hash" + (!type already_seen 1015) + (!srcfileloc "function.h" 491) + nil ) + (!pair "cfun" + (!type already_seen 351) + (!srcfileloc "function.h" 462) + nil ) + (!pair "regno_reg_rtx" + (!type already_seen 100) + (!srcfileloc "function.h" 87) + (!options + (!option length string "crtl->emit.x_reg_rtx_no") + ) + ) + (!pair "default_target_libfuncs" + (!type already_seen 603) + (!srcfileloc "libfuncs.h" 64) + nil ) + (!pair "current_function_func_begin_label" + (!type already_seen 11) + (!srcfileloc "tree-core.h" 2320) + nil ) + (!pair "current_function_decl" + (!type already_seen 23) + (!srcfileloc "tree-core.h" 2317) + nil ) + (!pair "builtin_info" + (!type array 2366 nil gc_used "(int)END_BUILTINS" + (!type already_seen 981) + ) + (!srcfileloc "tree-core.h" 2311) + nil ) + (!pair "sizetype_tab" + (!type array 2367 nil gc_used "(int) stk_type_kind_last" + (!type already_seen 23) + ) + (!srcfileloc "tree-core.h" 2299) + nil ) + (!pair "integer_types" + (!type array 2368 nil gc_used "itk_none" + (!type already_seen 23) + ) + (!srcfileloc "tree-core.h" 2296) + nil ) + (!pair "global_trees" + (!type array 2369 nil gc_used "TI_MAX" + (!type already_seen 23) + ) + (!srcfileloc "tree-core.h" 2292) + nil ) + (!pair "all_translation_units" + (!type already_seen 84) + (!srcfileloc "tree-core.h" 2289) + nil ) + (!pair "alias_pairs" + (!type already_seen 984) + (!srcfileloc "tree-core.h" 2275) + nil ) + (!pair "int_n_trees" + (!type array 2370 nil gc_used "NUM_INT_N_ENTS" + (!type already_seen 966) + ) + (!srcfileloc "tree.h" 6382) + nil ) + (!pair "stack_limit_rtx" + (!type already_seen 99) + (!srcfileloc "rtl.h" 4488) + nil ) + (!pair "default_target_rtl" + (!type already_seen 586) + (!srcfileloc "rtl.h" 3922) + nil ) + (!pair "invalid_insn_rtx" + (!type already_seen 296) + (!srcfileloc "rtl.h" 3820) + nil ) + (!pair "simple_return_rtx" + (!type already_seen 99) + (!srcfileloc "rtl.h" 3819) + nil ) + (!pair "ret_rtx" + (!type already_seen 99) + (!srcfileloc "rtl.h" 3818) + nil ) + (!pair "pc_rtx" + (!type already_seen 99) + (!srcfileloc "rtl.h" 3817) + nil ) + (!pair "const_tiny_rtx" + (!type array 2371 nil gc_used "4" + (!type array 2372 nil gc_used "(int) MAX_MACHINE_MODE" + (!type already_seen 99) + ) + ) + (!srcfileloc "rtl.h" 3804) + nil ) + (!pair "const_true_rtx" + (!type already_seen 99) + (!srcfileloc "rtl.h" 3802) + nil ) + (!pair "const_int_rtx" + (!type array 2373 nil gc_used "MAX_SAVED_CONST_INT * 2 + 1" + (!type already_seen 99) + ) + (!srcfileloc "rtl.h" 3796) + nil ) + (!pair "gcov_fn_info_ptr_type" + (!type already_seen 23) + (!srcfileloc "coverage.cc" 103) + nil ) + (!pair "gcov_fn_info_type" + (!type already_seen 23) + (!srcfileloc "coverage.cc" 102) + nil ) + (!pair "gcov_info_var" + (!type already_seen 23) + (!srcfileloc "coverage.cc" 101) + nil ) + (!pair "fn_v_ctrs" + (!type array 2374 nil gc_used "GCOV_COUNTERS" + (!type already_seen 23) + ) + (!srcfileloc "coverage.cc" 96) + nil ) + (!pair "functions_head" + (!type already_seen 899) + (!srcfileloc "coverage.cc" 87) + nil ) + (!pair "ms_va_list_type_node" + (!type already_seen 23) + (!srcfileloc "config/i386/i386.h" 2752) + nil ) + (!pair "sysv_va_list_type_node" + (!type already_seen 23) + (!srcfileloc "config/i386/i386.h" 2751) + nil ) + (!pair "x86_mfence" + (!type already_seen 23) + (!srcfileloc "config/i386/i386.h" 495) + nil ) + (!pair "saved_line_table" + (!type already_seen 697) + (!srcfileloc "input.h" 27) + nil ) + (!pair "line_table" + (!type already_seen 697) + (!srcfileloc "input.h" 26) + nil ) +) + +(!endfile) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ada/gcc-interface/ada-tree.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ada/gcc-interface/ada-tree.def new file mode 100644 index 0000000..8eb4688 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ada/gcc-interface/ada-tree.def @@ -0,0 +1,79 @@ +/**************************************************************************** + * * + * GNAT COMPILER COMPONENTS * + * * + * GNAT-SPECIFIC GCC TREE CODES * + * * + * Specification * + * * + * Copyright (C) 1992-2009, Free Software Foundation, Inc. * + * * + * GNAT is free software; you can redistribute it and/or modify it under * + * terms of the GNU General Public License as published by the Free Soft- * + * ware Foundation; either version 3, or (at your option) any later ver- * + * sion. GNAT is distributed in the hope that it will be useful, but WITH- * + * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * + * for more details. You should have received a copy of the GNU General * + * Public License along with GCC; see the file COPYING3. If not see * + * . * + * * + * GNAT was originally developed by the GNAT team at New York University. * + * Extensive contributions were provided by Ada Core Technologies Inc. * + * * + ****************************************************************************/ + +/* A type that is an unconstrained array. This node is never passed to GCC. + TREE_TYPE is the type of the fat pointer and TYPE_OBJECT_RECORD_TYPE is + the type of a record containing the template and data. */ +DEFTREECODE (UNCONSTRAINED_ARRAY_TYPE, "unconstrained_array_type", tcc_type, 0) + +/* A reference to an unconstrained array. This node only exists as an + intermediate node during the translation of a GNAT tree to a GCC tree; + it is never passed to GCC. The only field used is operand 0, which + is the fat pointer object. */ +DEFTREECODE (UNCONSTRAINED_ARRAY_REF, "unconstrained_array_ref", + tcc_reference, 1) + +/* An expression that returns an RTL suitable for its type. Operand 0 + is an expression to be evaluated for side effects only. */ +DEFTREECODE (NULL_EXPR, "null_expr", tcc_expression, 1) + +/* Same as PLUS_EXPR, except that no modulo reduction is applied. + This is used for loops and never shows up in the tree. */ +DEFTREECODE (PLUS_NOMOD_EXPR, "plus_nomod_expr", tcc_binary, 2) + +/* Same as MINUS_EXPR, except that no modulo reduction is applied. + This is used for loops and never shows up in the tree. */ +DEFTREECODE (MINUS_NOMOD_EXPR, "minus_nomod_expr", tcc_binary, 2) + +/* An expression that computes an exponentiation. Operand 0 is the base and + Operand 1 is the exponent. This node is never passed to GCC: it is only + used internally to describe fixed point types scale factors. */ +DEFTREECODE (POWER_EXPR, "power_expr", tcc_binary, 2) + +/* Same as ADDR_EXPR, except that if the operand represents a bit field, + return the address of the byte containing the bit. This is used + for the Address attribute and never shows up in the tree. */ +DEFTREECODE (ATTR_ADDR_EXPR, "attr_addr_expr", tcc_reference, 1) + +/* Here are the tree codes for the statement types known to Ada. These + must be at the end of this file to allow IS_ADA_STMT to work. */ + +/* This is how record_code_position and insert_code_for work. The former + makes this tree node, whose operand is a statement. The latter inserts + the actual statements into this node. Gimplification consists of + just returning the inner statement. */ +DEFTREECODE (STMT_STMT, "stmt_stmt", tcc_statement, 1) + +/* A loop. LOOP_STMT_COND is the test to exit the loop. LOOP_STMT_UPDATE + is the statement to update the loop iteration variable at the continue + point. LOOP_STMT_BODY are the statements in the body of the loop. And + LOOP_STMT_LABEL points to the LABEL_DECL of the end label of the loop. */ +DEFTREECODE (LOOP_STMT, "loop_stmt", tcc_statement, 4) + +/* Conditionally exit a loop. EXIT_STMT_COND is the condition, which, if + true, will cause the loop to be exited. If no condition is specified, + the loop is unconditionally exited. EXIT_STMT_LABEL is the end label + corresponding to the loop to exit. */ +DEFTREECODE (EXIT_STMT, "exit_stmt", tcc_statement, 2) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/addresses.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/addresses.h new file mode 100644 index 0000000..925a579 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/addresses.h @@ -0,0 +1,90 @@ +/* Inline functions to test validity of reg classes for addressing modes. + Copyright (C) 2006-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Wrapper function to unify target macros MODE_CODE_BASE_REG_CLASS, + MODE_BASE_REG_REG_CLASS, MODE_BASE_REG_CLASS and BASE_REG_CLASS. + Arguments as for the MODE_CODE_BASE_REG_CLASS macro. */ + +#ifndef GCC_ADDRESSES_H +#define GCC_ADDRESSES_H + +static inline enum reg_class +base_reg_class (machine_mode mode ATTRIBUTE_UNUSED, + addr_space_t as ATTRIBUTE_UNUSED, + enum rtx_code outer_code ATTRIBUTE_UNUSED, + enum rtx_code index_code ATTRIBUTE_UNUSED) +{ +#ifdef MODE_CODE_BASE_REG_CLASS + return MODE_CODE_BASE_REG_CLASS (MACRO_MODE (mode), as, outer_code, + index_code); +#else +#ifdef MODE_BASE_REG_REG_CLASS + if (index_code == REG) + return MODE_BASE_REG_REG_CLASS (MACRO_MODE (mode)); +#endif +#ifdef MODE_BASE_REG_CLASS + return MODE_BASE_REG_CLASS (MACRO_MODE (mode)); +#else + return BASE_REG_CLASS; +#endif +#endif +} + +/* Wrapper function to unify target macros REGNO_MODE_CODE_OK_FOR_BASE_P, + REGNO_MODE_OK_FOR_REG_BASE_P, REGNO_MODE_OK_FOR_BASE_P and + REGNO_OK_FOR_BASE_P. + Arguments as for the REGNO_MODE_CODE_OK_FOR_BASE_P macro. */ + +static inline bool +ok_for_base_p_1 (unsigned regno ATTRIBUTE_UNUSED, + machine_mode mode ATTRIBUTE_UNUSED, + addr_space_t as ATTRIBUTE_UNUSED, + enum rtx_code outer_code ATTRIBUTE_UNUSED, + enum rtx_code index_code ATTRIBUTE_UNUSED) +{ +#ifdef REGNO_MODE_CODE_OK_FOR_BASE_P + return REGNO_MODE_CODE_OK_FOR_BASE_P (regno, MACRO_MODE (mode), as, + outer_code, index_code); +#else +#ifdef REGNO_MODE_OK_FOR_REG_BASE_P + if (index_code == REG) + return REGNO_MODE_OK_FOR_REG_BASE_P (regno, MACRO_MODE (mode)); +#endif +#ifdef REGNO_MODE_OK_FOR_BASE_P + return REGNO_MODE_OK_FOR_BASE_P (regno, MACRO_MODE (mode)); +#else + return REGNO_OK_FOR_BASE_P (regno); +#endif +#endif +} + +/* Wrapper around ok_for_base_p_1, for use after register allocation is + complete. Arguments as for the called function. */ + +static inline bool +regno_ok_for_base_p (unsigned regno, machine_mode mode, addr_space_t as, + enum rtx_code outer_code, enum rtx_code index_code) +{ + if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0) + regno = reg_renumber[regno]; + + return ok_for_base_p_1 (regno, mode, as, outer_code, index_code); +} + +#endif /* GCC_ADDRESSES_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/alias.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/alias.h new file mode 100644 index 0000000..b259651 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/alias.h @@ -0,0 +1,50 @@ +/* Exported functions from alias.cc + Copyright (C) 2004-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_ALIAS_H +#define GCC_ALIAS_H + +extern alias_set_type new_alias_set (void); +extern alias_set_type get_alias_set (tree); +extern alias_set_type get_deref_alias_set (tree); +extern alias_set_type get_varargs_alias_set (void); +extern alias_set_type get_frame_alias_set (void); +extern tree component_uses_parent_alias_set_from (const_tree); +extern bool ends_tbaa_access_path_p (const_tree); +extern bool alias_set_subset_of (alias_set_type, alias_set_type); +extern void record_alias_subset (alias_set_type, alias_set_type); +extern void record_component_aliases (tree); +extern int alias_sets_conflict_p (alias_set_type, alias_set_type); +extern int alias_sets_must_conflict_p (alias_set_type, alias_set_type); +extern int objects_must_conflict_p (tree, tree); +extern int nonoverlapping_memrefs_p (const_rtx, const_rtx, bool); +extern void dump_alias_stats_in_alias_c (FILE *s); +tree reference_alias_ptr_type (tree); +tree reference_alias_ptr_type_1 (tree *); +bool alias_ptr_types_compatible_p (tree, tree); +int compare_base_decls (tree, tree); +bool refs_same_for_tbaa_p (tree, tree); + +/* This alias set can be used to force a memory to conflict with all + other memories, creating a barrier across which no memory reference + can move. Note that there are other legacy ways to create such + memory barriers, including an address of SCRATCH. */ +#define ALIAS_SET_MEMORY_BARRIER ((alias_set_type) -1) + +#endif /* GCC_ALIAS_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/align.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/align.h new file mode 100644 index 0000000..a51b5c6 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/align.h @@ -0,0 +1,83 @@ +/* Alignment-related classes. + Copyright (C) 2018-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Align flags tuple with alignment in log form and with a maximum skip. */ + +struct align_flags_tuple +{ + /* Values of the -falign-* flags: how much to align labels in code. + log is "align to 2^log" (so 0 means no alignment). + maxskip is the maximum allowed amount of padding to insert. */ + int log; + int maxskip; + + /* Normalize filled values so that maxskip is not bigger than 1 << log. */ + void normalize () + { + int n = (1 << log); + if (maxskip > n) + maxskip = n - 1; + } + + /* Return original value of an alignment flag. */ + int get_value () + { + return maxskip + 1; + } +}; + +/* Alignment flags is structure used as value of -align-* options. + It's used in target-dependant code. */ + +class align_flags +{ +public: + /* Default constructor. */ + align_flags (int log0 = 0, int maxskip0 = 0, int log1 = 0, int maxskip1 = 0) + { + levels[0].log = log0; + levels[0].maxskip = maxskip0; + levels[1].log = log1; + levels[1].maxskip = maxskip1; + normalize (); + } + + /* Normalize both components of align_flags. */ + void normalize () + { + for (unsigned i = 0; i < 2; i++) + levels[i].normalize (); + } + + /* Get alignment that is common bigger alignment of alignments F0 and F1. */ + static align_flags max (const align_flags f0, const align_flags f1) + { + int log0 = MAX (f0.levels[0].log, f1.levels[0].log); + int maxskip0 = MAX (f0.levels[0].maxskip, f1.levels[0].maxskip); + int log1 = MAX (f0.levels[1].log, f1.levels[1].log); + int maxskip1 = MAX (f0.levels[1].maxskip, f1.levels[1].maxskip); + return align_flags (log0, maxskip0, log1, maxskip1); + } + + align_flags_tuple levels[2]; +}; + +/* Define maximum supported code alignment. */ +#define MAX_CODE_ALIGN 16 +#define MAX_CODE_ALIGN_VALUE (1 << MAX_CODE_ALIGN) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/all-tree.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/all-tree.def new file mode 100644 index 0000000..724d185 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/all-tree.def @@ -0,0 +1,7 @@ +#include "tree.def" +END_OF_BASE_TREE_CODES +#include "c-family/c-common.def" +#include "ada/gcc-interface/ada-tree.def" +#include "cp/cp-tree.def" +#include "d/d-tree.def" +#include "objc/objc-tree.def" diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/alloc-pool.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/alloc-pool.h new file mode 100644 index 0000000..3963596 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/alloc-pool.h @@ -0,0 +1,576 @@ +/* Functions to support a pool of allocatable objects + Copyright (C) 1997-2022 Free Software Foundation, Inc. + Contributed by Daniel Berlin + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ +#ifndef ALLOC_POOL_H +#define ALLOC_POOL_H + +#include "memory-block.h" +#include "options.h" // for flag_checking + +extern void dump_alloc_pool_statistics (void); + +/* Flag indicates whether memory statistics are gathered any longer. */ +extern bool after_memory_report; + +typedef unsigned long ALLOC_POOL_ID_TYPE; + +/* Last used ID. */ +extern ALLOC_POOL_ID_TYPE last_id; + +/* Pool allocator memory usage. */ +class pool_usage: public mem_usage +{ +public: + /* Default contructor. */ + pool_usage (): m_element_size (0), m_pool_name ("") {} + /* Constructor. */ + pool_usage (size_t allocated, size_t times, size_t peak, + size_t instances, size_t element_size, + const char *pool_name) + : mem_usage (allocated, times, peak, instances), + m_element_size (element_size), + m_pool_name (pool_name) {} + + /* Sum the usage with SECOND usage. */ + pool_usage + operator+ (const pool_usage &second) + { + return pool_usage (m_allocated + second.m_allocated, + m_times + second.m_times, + m_peak + second.m_peak, + m_instances + second.m_instances, + m_element_size, m_pool_name); + } + + /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */ + inline void + dump (mem_location *loc, const mem_usage &total) const + { + char *location_string = loc->to_string (); + + fprintf (stderr, "%-32s%-48s " PRsa(5) PRsa(9) ":%5.1f%%" + PRsa(9) PRsa(9) ":%5.1f%%%12" PRIu64 "\n", + m_pool_name, location_string, + SIZE_AMOUNT (m_instances), + SIZE_AMOUNT (m_allocated), + get_percent (m_allocated, total.m_allocated), + SIZE_AMOUNT (m_peak), + SIZE_AMOUNT (m_times), + get_percent (m_times, total.m_times), + (uint64_t)m_element_size); + + free (location_string); + } + + /* Dump header with NAME. */ + static inline void + dump_header (const char *name) + { + fprintf (stderr, "%-32s%-48s %6s%11s%16s%17s%12s\n", "Pool name", name, + "Pools", "Leak", "Peak", "Times", "Elt size"); + } + + /* Dump footer. */ + inline void + dump_footer () + { + fprintf (stderr, "%s" PRsa(82) PRsa(10) "\n", "Total", + SIZE_AMOUNT (m_instances), SIZE_AMOUNT (m_allocated)); + } + + /* Element size. */ + size_t m_element_size; + /* Pool name. */ + const char *m_pool_name; +}; + +extern mem_alloc_description pool_allocator_usage; + +#if 0 +/* If a pool with custom block size is needed, one might use the following + template. An instance of this template can be used as a parameter for + instantiating base_pool_allocator template: + + typedef custom_block_allocator <128*1024> huge_block_allocator; + ... + static base_pool_allocator + value_pool ("value", 16384); + + Right now it's not used anywhere in the code, and is given here as an + example). */ + +template +class custom_block_allocator +{ +public: + static const size_t block_size = BlockSize; + + static inline void * + allocate () ATTRIBUTE_MALLOC + { + return XNEWVEC (char, BlockSize); + } + + static inline void + release (void *block) + { + XDELETEVEC (block); + } +}; +#endif + +/* Generic pool allocator. */ + +template +class base_pool_allocator +{ +public: + /* Default constructor for pool allocator called NAME. */ + base_pool_allocator (const char *name, size_t size CXX_MEM_STAT_INFO); + ~base_pool_allocator (); + void release (); + void release_if_empty (); + void *allocate () ATTRIBUTE_MALLOC; + void remove (void *object); + size_t num_elts_current (); + +private: + struct allocation_pool_list + { + allocation_pool_list *next; + }; + + /* Initialize a pool allocator. */ + void initialize (); + + struct allocation_object + { +#if CHECKING_P + /* The ID of alloc pool which the object was allocated from. */ + ALLOC_POOL_ID_TYPE id; +#endif + + union + { + /* The data of the object. */ + char data[1]; + + /* Because we want any type of data to be well aligned after the ID, + the following elements are here. They are never accessed so + the allocated object may be even smaller than this structure. + We do not care about alignment for floating-point types. */ + char *align_p; + int64_t align_i; + } u; + +#if CHECKING_P + static inline allocation_object* + get_instance (void *data_ptr) + { + return (allocation_object *)(((char *)(data_ptr)) + - offsetof (allocation_object, + u.data)); + } +#endif + + static inline void* + get_data (void *instance_ptr) + { + return (void*)(((allocation_object *) instance_ptr)->u.data); + } + }; + + /* Align X to 8. */ + static inline size_t + align_eight (size_t x) + { + return (((x+7) >> 3) << 3); + } + + const char *m_name; + ALLOC_POOL_ID_TYPE m_id; + size_t m_elts_per_block; + + /* These are the elements that have been allocated at least once + and freed. */ + allocation_pool_list *m_returned_free_list; + + /* These are the elements that have not yet been allocated out of + the last block obtained from XNEWVEC. */ + char* m_virgin_free_list; + + /* The number of elements in the virgin_free_list that can be + allocated before needing another block. */ + size_t m_virgin_elts_remaining; + /* The number of elements that are allocated. */ + size_t m_elts_allocated; + /* The number of elements that are released. */ + size_t m_elts_free; + /* The number of allocated blocks. */ + size_t m_blocks_allocated; + /* List of blocks that are used to allocate new objects. */ + allocation_pool_list *m_block_list; + /* Size of a pool elements in bytes. */ + size_t m_elt_size; + /* Size in bytes that should be allocated for each element. */ + size_t m_size; + /* Flag if a pool allocator is initialized. */ + bool m_initialized; + /* Memory allocation location. */ + mem_location m_location; +}; + +template +inline +base_pool_allocator ::base_pool_allocator ( + const char *name, size_t size MEM_STAT_DECL): + m_name (name), m_id (0), m_elts_per_block (0), m_returned_free_list (NULL), + m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0), + m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL), m_elt_size (0), + m_size (size), m_initialized (false), + m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {} + +/* Initialize a pool allocator. */ + +template +inline void +base_pool_allocator ::initialize () +{ + gcc_checking_assert (!m_initialized); + m_initialized = true; + + size_t size = m_size; + + gcc_checking_assert (m_name); + gcc_checking_assert (m_size); + + /* Make size large enough to store the list header. */ + if (size < sizeof (allocation_pool_list*)) + size = sizeof (allocation_pool_list*); + + /* Now align the size to a multiple of 8. */ + size = align_eight (size); + + /* Add the aligned size of ID. */ + size += offsetof (allocation_object, u.data); + + m_elt_size = size; + + if (GATHER_STATISTICS) + { + pool_usage *u = pool_allocator_usage.register_descriptor + (this, new mem_location (m_location)); + + u->m_element_size = m_elt_size; + u->m_pool_name = m_name; + } + + /* List header size should be a multiple of 8. */ + size_t header_size = align_eight (sizeof (allocation_pool_list)); + + m_elts_per_block = (TBlockAllocator::block_size - header_size) / size; + gcc_checking_assert (m_elts_per_block != 0); + + /* Increase the last used ID and use it for this pool. + ID == 0 is used for free elements of pool so skip it. */ + last_id++; + if (last_id == 0) + last_id++; + + m_id = last_id; +} + +/* Free all memory allocated for the given memory pool. */ +template +inline void +base_pool_allocator ::release () +{ + if (!m_initialized) + return; + + allocation_pool_list *block, *next_block; + + /* Free each block allocated to the pool. */ + for (block = m_block_list; block != NULL; block = next_block) + { + next_block = block->next; + TBlockAllocator::release (block); + } + + if (GATHER_STATISTICS && !after_memory_report) + { + pool_allocator_usage.release_instance_overhead + (this, (m_elts_allocated - m_elts_free) * m_elt_size); + } + + m_returned_free_list = NULL; + m_virgin_free_list = NULL; + m_virgin_elts_remaining = 0; + m_elts_allocated = 0; + m_elts_free = 0; + m_blocks_allocated = 0; + m_block_list = NULL; +} + +template +inline void +base_pool_allocator ::release_if_empty () +{ + if (m_elts_free == m_elts_allocated) + release (); +} + +template +inline base_pool_allocator ::~base_pool_allocator () +{ + release (); +} + +/* Allocates one element from the pool specified. */ +template +inline void* +base_pool_allocator ::allocate () +{ + if (!m_initialized) + initialize (); + + allocation_pool_list *header; +#ifdef ENABLE_VALGRIND_ANNOTATIONS + int size; +#endif + + if (GATHER_STATISTICS) + { + pool_allocator_usage.register_instance_overhead (m_elt_size, this); + } + +#ifdef ENABLE_VALGRIND_ANNOTATIONS + size = m_elt_size - offsetof (allocation_object, u.data); +#endif + + /* If there are no more free elements, make some more!. */ + if (!m_returned_free_list) + { + char *block; + if (!m_virgin_elts_remaining) + { + allocation_pool_list *block_header; + + /* Make the block. */ + block = reinterpret_cast (TBlockAllocator::allocate ()); + block_header = new (block) allocation_pool_list; + block += align_eight (sizeof (allocation_pool_list)); + + /* Throw it on the block list. */ + block_header->next = m_block_list; + m_block_list = block_header; + + /* Make the block available for allocation. */ + m_virgin_free_list = block; + m_virgin_elts_remaining = m_elts_per_block; + + /* Also update the number of elements we have free/allocated, and + increment the allocated block count. */ + m_elts_allocated += m_elts_per_block; + m_elts_free += m_elts_per_block; + m_blocks_allocated += 1; + } + + /* We now know that we can take the first elt off the virgin list and + put it on the returned list. */ + block = m_virgin_free_list; + header = (allocation_pool_list*) allocation_object::get_data (block); + header->next = NULL; + + /* Mark the element to be free. */ +#if CHECKING_P + ((allocation_object*) block)->id = 0; +#endif + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size)); + m_returned_free_list = header; + m_virgin_free_list += m_elt_size; + m_virgin_elts_remaining--; + + } + + /* Pull the first free element from the free list, and return it. */ + header = m_returned_free_list; + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header, sizeof (*header))); + m_returned_free_list = header->next; + m_elts_free--; + + /* Set the ID for element. */ +#if CHECKING_P + allocation_object::get_instance (header)->id = m_id; +#endif + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size)); + + return (void *)(header); +} + +/* Puts PTR back on POOL's free list. */ +template +inline void +base_pool_allocator ::remove (void *object) +{ + int size = m_elt_size - offsetof (allocation_object, u.data); + + if (flag_checking) + { + gcc_assert (m_initialized); + gcc_assert (object + /* Check if we free more than we allocated. */ + && m_elts_free < m_elts_allocated); +#if CHECKING_P + /* Check whether the PTR was allocated from POOL. */ + gcc_assert (m_id == allocation_object::get_instance (object)->id); +#endif + + memset (object, 0xaf, size); + } + +#if CHECKING_P + /* Mark the element to be free. */ + allocation_object::get_instance (object)->id = 0; +#endif + + allocation_pool_list *header = new (object) allocation_pool_list; + header->next = m_returned_free_list; + m_returned_free_list = header; + VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size)); + m_elts_free++; + + if (GATHER_STATISTICS) + { + pool_allocator_usage.release_instance_overhead (this, m_elt_size); + } +} + +/* Number of elements currently active (not returned to pool). Used for cheap + consistency checks. */ +template +inline size_t +base_pool_allocator ::num_elts_current () +{ + return m_elts_allocated - m_elts_free; +} + +/* Specialization of base_pool_allocator which should be used in most cases. + Another specialization may be needed, if object size is greater than + memory_block_pool::block_size (64 KB). */ +typedef base_pool_allocator pool_allocator; + +/* Type based memory pool allocator. */ +template +class object_allocator +{ +public: + /* Default constructor for pool allocator called NAME. */ + object_allocator (const char *name CXX_MEM_STAT_INFO): + m_allocator (name, sizeof (T) PASS_MEM_STAT) {} + + inline void + release () + { + m_allocator.release (); + } + + inline void release_if_empty () + { + m_allocator.release_if_empty (); + } + + + /* Allocate memory for instance of type T and call a default constructor. */ + + inline T * + allocate () ATTRIBUTE_MALLOC + { + return ::new (m_allocator.allocate ()) T; + } + + /* Allocate memory for instance of type T and return void * that + could be used in situations where a default constructor is not provided + by the class T. */ + + inline void * + allocate_raw () ATTRIBUTE_MALLOC + { + return m_allocator.allocate (); + } + + inline void + remove (T *object) + { + /* Call destructor. */ + object->~T (); + + m_allocator.remove (object); + } + + inline void + remove_raw (void *object) + { + m_allocator.remove (object); + } + + inline size_t + num_elts_current () + { + return m_allocator.num_elts_current (); + } + +private: + pool_allocator m_allocator; +}; + +/* Store information about each particular alloc_pool. Note that this + will underestimate the amount the amount of storage used by a small amount: + 1) The overhead in a pool is not accounted for. + 2) The unallocated elements in a block are not accounted for. Note + that this can at worst case be one element smaller that the block + size for that pool. */ +struct alloc_pool_descriptor +{ + /* Number of pools allocated. */ + unsigned long created; + /* Gross allocated storage. */ + unsigned long allocated; + /* Amount of currently active storage. */ + unsigned long current; + /* Peak amount of storage used. */ + unsigned long peak; + /* Size of element in the pool. */ + int elt_size; +}; + +/* Helper for classes that do not provide default ctor. */ + +template +inline void * +operator new (size_t, object_allocator &a) +{ + return a.allocate_raw (); +} + +/* Hashtable mapping alloc_pool names to descriptors. */ +extern hash_map *alloc_pool_hash; + + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ansidecl.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ansidecl.h new file mode 100644 index 0000000..4275c9b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ansidecl.h @@ -0,0 +1,436 @@ +/* ANSI and traditional C compatability macros + Copyright (C) 1991-2022 Free Software Foundation, Inc. + This file is part of the GNU C Library. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ + +/* ANSI and traditional C compatibility macros + + ANSI C is assumed if __STDC__ is #defined. + + Macro ANSI C definition Traditional C definition + ----- ---- - ---------- ----------- - ---------- + PTR `void *' `char *' + const not defined `' + volatile not defined `' + signed not defined `' + + For ease of writing code which uses GCC extensions but needs to be + portable to other compilers, we provide the GCC_VERSION macro that + simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various + wrappers around __attribute__. Also, __extension__ will be #defined + to nothing if it doesn't work. See below. */ + +#ifndef _ANSIDECL_H +#define _ANSIDECL_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* Every source file includes this file, + so they will all get the switch for lint. */ +/* LINTLIBRARY */ + +/* Using MACRO(x,y) in cpp #if conditionals does not work with some + older preprocessors. Thus we can't define something like this: + +#define HAVE_GCC_VERSION(MAJOR, MINOR) \ + (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR))) + +and then test "#if HAVE_GCC_VERSION(2,7)". + +So instead we use the macro below and test it against specific values. */ + +/* This macro simplifies testing whether we are using gcc, and if it + is of a particular minimum version. (Both major & minor numbers are + significant.) This macro will evaluate to 0 if we are not using + gcc at all. */ +#ifndef GCC_VERSION +#define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) +#endif /* GCC_VERSION */ + +#if defined (__STDC__) || defined(__cplusplus) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32) +/* All known AIX compilers implement these things (but don't always + define __STDC__). The RISC/OS MIPS compiler defines these things + in SVR4 mode, but does not define __STDC__. */ +/* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other + C++ compilers, does not define __STDC__, though it acts as if this + was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */ + +#define PTR void * + +#undef const +#undef volatile +#undef signed + +/* inline requires special treatment; it's in C99, and GCC >=2.7 supports + it too, but it's not in C89. */ +#undef inline +#if (!defined(__cplusplus) && __STDC_VERSION__ >= 199901L) || defined(__cplusplus) || (defined(__SUNPRO_C) && defined(__C99FEATURES__)) +/* it's a keyword */ +#else +# if GCC_VERSION >= 2007 +# define inline __inline__ /* __inline__ prevents -pedantic warnings */ +# else +# define inline /* nothing */ +# endif +#endif + +#else /* Not ANSI C. */ + +#define PTR char * + +/* some systems define these in header files for non-ansi mode */ +#undef const +#undef volatile +#undef signed +#undef inline +#define const +#define volatile +#define signed +#define inline + +#endif /* ANSI C. */ + +/* Define macros for some gcc attributes. This permits us to use the + macros freely, and know that they will come into play for the + version of gcc in which they are supported. */ + +#if (GCC_VERSION < 2007) +# define __attribute__(x) +#endif + +/* Attribute __malloc__ on functions was valid as of gcc 2.96. */ +#ifndef ATTRIBUTE_MALLOC +# if (GCC_VERSION >= 2096) +# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__)) +# else +# define ATTRIBUTE_MALLOC +# endif /* GNUC >= 2.96 */ +#endif /* ATTRIBUTE_MALLOC */ + +/* Attributes on labels were valid as of gcc 2.93 and g++ 4.5. For + g++ an attribute on a label must be followed by a semicolon. */ +#ifndef ATTRIBUTE_UNUSED_LABEL +# ifndef __cplusplus +# if GCC_VERSION >= 2093 +# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED +# else +# define ATTRIBUTE_UNUSED_LABEL +# endif +# else +# if GCC_VERSION >= 4005 +# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED ; +# else +# define ATTRIBUTE_UNUSED_LABEL +# endif +# endif +#endif + +/* Similarly to ARG_UNUSED below. Prior to GCC 3.4, the C++ frontend + couldn't parse attributes placed after the identifier name, and now + the entire compiler is built with C++. */ +#ifndef ATTRIBUTE_UNUSED +#if GCC_VERSION >= 3004 +# define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) +#else +#define ATTRIBUTE_UNUSED +#endif +#endif /* ATTRIBUTE_UNUSED */ + +/* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the + identifier name. */ +#if ! defined(__cplusplus) || (GCC_VERSION >= 3004) +# define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED +#else /* !__cplusplus || GNUC >= 3.4 */ +# define ARG_UNUSED(NAME) NAME +#endif /* !__cplusplus || GNUC >= 3.4 */ + +#ifndef ATTRIBUTE_NORETURN +#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__)) +#endif /* ATTRIBUTE_NORETURN */ + +/* Attribute `nonnull' was valid as of gcc 3.3. */ +#ifndef ATTRIBUTE_NONNULL +# if (GCC_VERSION >= 3003) +# define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m))) +# else +# define ATTRIBUTE_NONNULL(m) +# endif /* GNUC >= 3.3 */ +#endif /* ATTRIBUTE_NONNULL */ + +/* Attribute `returns_nonnull' was valid as of gcc 4.9. */ +#ifndef ATTRIBUTE_RETURNS_NONNULL +# if (GCC_VERSION >= 4009) +# define ATTRIBUTE_RETURNS_NONNULL __attribute__ ((__returns_nonnull__)) +# else +# define ATTRIBUTE_RETURNS_NONNULL +# endif /* GNUC >= 4.9 */ +#endif /* ATTRIBUTE_RETURNS_NONNULL */ + +/* Attribute `pure' was valid as of gcc 3.0. */ +#ifndef ATTRIBUTE_PURE +# if (GCC_VERSION >= 3000) +# define ATTRIBUTE_PURE __attribute__ ((__pure__)) +# else +# define ATTRIBUTE_PURE +# endif /* GNUC >= 3.0 */ +#endif /* ATTRIBUTE_PURE */ + +/* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL. + This was the case for the `printf' format attribute by itself + before GCC 3.3, but as of 3.3 we need to add the `nonnull' + attribute to retain this behavior. */ +#ifndef ATTRIBUTE_PRINTF +#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m) +#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2) +#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3) +#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4) +#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5) +#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6) +#endif /* ATTRIBUTE_PRINTF */ + +/* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on + a function pointer. Format attributes were allowed on function + pointers as of gcc 3.1. */ +#ifndef ATTRIBUTE_FPTR_PRINTF +# if (GCC_VERSION >= 3001) +# define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n) +# else +# define ATTRIBUTE_FPTR_PRINTF(m, n) +# endif /* GNUC >= 3.1 */ +# define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2) +# define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3) +# define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4) +# define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5) +# define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6) +#endif /* ATTRIBUTE_FPTR_PRINTF */ + +/* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A + NULL format specifier was allowed as of gcc 3.3. */ +#ifndef ATTRIBUTE_NULL_PRINTF +# if (GCC_VERSION >= 3003) +# define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) +# else +# define ATTRIBUTE_NULL_PRINTF(m, n) +# endif /* GNUC >= 3.3 */ +# define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2) +# define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3) +# define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4) +# define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5) +# define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6) +#endif /* ATTRIBUTE_NULL_PRINTF */ + +/* Attribute `sentinel' was valid as of gcc 3.5. */ +#ifndef ATTRIBUTE_SENTINEL +# if (GCC_VERSION >= 3005) +# define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__)) +# else +# define ATTRIBUTE_SENTINEL +# endif /* GNUC >= 3.5 */ +#endif /* ATTRIBUTE_SENTINEL */ + + +#ifndef ATTRIBUTE_ALIGNED_ALIGNOF +# if (GCC_VERSION >= 3000) +# define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m)))) +# else +# define ATTRIBUTE_ALIGNED_ALIGNOF(m) +# endif /* GNUC >= 3.0 */ +#endif /* ATTRIBUTE_ALIGNED_ALIGNOF */ + +/* Useful for structures whose layout must match some binary specification + regardless of the alignment and padding qualities of the compiler. */ +#ifndef ATTRIBUTE_PACKED +# define ATTRIBUTE_PACKED __attribute__ ((packed)) +#endif + +/* Attribute `hot' and `cold' was valid as of gcc 4.3. */ +#ifndef ATTRIBUTE_COLD +# if (GCC_VERSION >= 4003) +# define ATTRIBUTE_COLD __attribute__ ((__cold__)) +# else +# define ATTRIBUTE_COLD +# endif /* GNUC >= 4.3 */ +#endif /* ATTRIBUTE_COLD */ +#ifndef ATTRIBUTE_HOT +# if (GCC_VERSION >= 4003) +# define ATTRIBUTE_HOT __attribute__ ((__hot__)) +# else +# define ATTRIBUTE_HOT +# endif /* GNUC >= 4.3 */ +#endif /* ATTRIBUTE_HOT */ + +/* Attribute 'no_sanitize_undefined' was valid as of gcc 4.9. */ +#ifndef ATTRIBUTE_NO_SANITIZE_UNDEFINED +# if (GCC_VERSION >= 4009) +# define ATTRIBUTE_NO_SANITIZE_UNDEFINED __attribute__ ((no_sanitize_undefined)) +# else +# define ATTRIBUTE_NO_SANITIZE_UNDEFINED +# endif /* GNUC >= 4.9 */ +#endif /* ATTRIBUTE_NO_SANITIZE_UNDEFINED */ + +/* Attribute 'nonstring' was valid as of gcc 8. */ +#ifndef ATTRIBUTE_NONSTRING +# if GCC_VERSION >= 8000 +# define ATTRIBUTE_NONSTRING __attribute__ ((__nonstring__)) +# else +# define ATTRIBUTE_NONSTRING +# endif +#endif + +/* Attribute `alloc_size' was valid as of gcc 4.3. */ +#ifndef ATTRIBUTE_RESULT_SIZE_1 +# if (GCC_VERSION >= 4003) +# define ATTRIBUTE_RESULT_SIZE_1 __attribute__ ((alloc_size (1))) +# else +# define ATTRIBUTE_RESULT_SIZE_1 +#endif +#endif + +#ifndef ATTRIBUTE_RESULT_SIZE_2 +# if (GCC_VERSION >= 4003) +# define ATTRIBUTE_RESULT_SIZE_2 __attribute__ ((alloc_size (2))) +# else +# define ATTRIBUTE_RESULT_SIZE_2 +#endif +#endif + +#ifndef ATTRIBUTE_RESULT_SIZE_1_2 +# if (GCC_VERSION >= 4003) +# define ATTRIBUTE_RESULT_SIZE_1_2 __attribute__ ((alloc_size (1, 2))) +# else +# define ATTRIBUTE_RESULT_SIZE_1_2 +#endif +#endif + +/* Attribute `warn_unused_result' was valid as of gcc 3.3. */ +#ifndef ATTRIBUTE_WARN_UNUSED_RESULT +# if GCC_VERSION >= 3003 +# define ATTRIBUTE_WARN_UNUSED_RESULT __attribute__ ((warn_unused_result)) +# else +# define ATTRIBUTE_WARN_UNUSED_RESULT +# endif +#endif + +/* We use __extension__ in some places to suppress -pedantic warnings + about GCC extensions. This feature didn't work properly before + gcc 2.8. */ +#if GCC_VERSION < 2008 +#define __extension__ +#endif + +/* This is used to declare a const variable which should be visible + outside of the current compilation unit. Use it as + EXPORTED_CONST int i = 1; + This is because the semantics of const are different in C and C++. + "extern const" is permitted in C but it looks strange, and gcc + warns about it when -Wc++-compat is not used. */ +#ifdef __cplusplus +#define EXPORTED_CONST extern const +#else +#define EXPORTED_CONST const +#endif + +/* Be conservative and only use enum bitfields with C++ or GCC. + FIXME: provide a complete autoconf test for buggy enum bitfields. */ + +#ifdef __cplusplus +#define ENUM_BITFIELD(TYPE) enum TYPE +#elif (GCC_VERSION > 2000) +#define ENUM_BITFIELD(TYPE) __extension__ enum TYPE +#else +#define ENUM_BITFIELD(TYPE) unsigned int +#endif + +#if defined(__cplusplus) && __cpp_constexpr >= 200704 +#define CONSTEXPR constexpr +#else +#define CONSTEXPR +#endif + +/* C++11 adds the ability to add "override" after an implementation of a + virtual function in a subclass, to: + (A) document that this is an override of a virtual function + (B) allow the compiler to issue a warning if it isn't (e.g. a mismatch + of the type signature). + + Similarly, it allows us to add a "final" to indicate that no subclass + may subsequently override the vfunc. + + Provide OVERRIDE and FINAL as macros, allowing us to get these benefits + when compiling with C++11 support, but without requiring C++11. + + For gcc, use "-std=c++11" to enable C++11 support; gcc 6 onwards enables + this by default (actually GNU++14). */ + +#if defined __cplusplus +# if __cplusplus >= 201103 + /* C++11 claims to be available: use it. Final/override were only + implemented in 4.7, though. */ +# if GCC_VERSION < 4007 +# define OVERRIDE +# define FINAL +# else +# define OVERRIDE override +# define FINAL final +# endif +# elif GCC_VERSION >= 4007 + /* G++ 4.7 supports __final in C++98. */ +# define OVERRIDE +# define FINAL __final +# else + /* No C++11 support; leave the macros empty. */ +# define OVERRIDE +# define FINAL +# endif +#else + /* No C++11 support; leave the macros empty. */ +# define OVERRIDE +# define FINAL +#endif + +/* A macro to disable the copy constructor and assignment operator. + When building with C++11 and above, the methods are explicitly + deleted, causing a compile-time error if something tries to copy. + For C++03, this just declares the methods, causing a link-time + error if the methods end up called (assuming you don't + define them). For C++03, for best results, place the macro + under the private: access specifier, like this, + + class name_lookup + { + private: + DISABLE_COPY_AND_ASSIGN (name_lookup); + }; + + so that most attempts at copy are caught at compile-time. */ + +#if defined(__cplusplus) && __cplusplus >= 201103 +#define DISABLE_COPY_AND_ASSIGN(TYPE) \ + TYPE (const TYPE&) = delete; \ + void operator= (const TYPE &) = delete + #else +#define DISABLE_COPY_AND_ASSIGN(TYPE) \ + TYPE (const TYPE&); \ + void operator= (const TYPE &) +#endif /* __cplusplus >= 201103 */ + +#ifdef __cplusplus +} +#endif + +#endif /* ansidecl.h */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/array-traits.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/array-traits.h new file mode 100644 index 0000000..3741777 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/array-traits.h @@ -0,0 +1,48 @@ +/* Descriptions of array-like objects. + Copyright (C) 2019-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_ARRAY_TRAITS_H +#define GCC_ARRAY_TRAITS_H + +/* Implementation for single integers (and similar types). */ +template +struct scalar_array_traits +{ + typedef T element_type; + static const bool has_constant_size = true; + static const size_t constant_size = 1; + static const T *base (const T &x) { return &x; } + static size_t size (const T &) { return 1; } +}; + +template +struct array_traits : scalar_array_traits {}; + +/* Implementation for arrays with a static size. */ +template +struct array_traits +{ + typedef T element_type; + static const bool has_constant_size = true; + static const size_t constant_size = N; + static const T *base (const T (&x)[N]) { return x; } + static size_t size (const T (&)[N]) { return N; } +}; + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/asan.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/asan.h new file mode 100644 index 0000000..d4ea49c --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/asan.h @@ -0,0 +1,263 @@ +/* AddressSanitizer, a fast memory error detector. + Copyright (C) 2011-2022 Free Software Foundation, Inc. + Contributed by Kostya Serebryany + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef TREE_ASAN +#define TREE_ASAN + +extern void asan_function_start (void); +extern void asan_finish_file (void); +extern rtx_insn *asan_emit_stack_protection (rtx, rtx, unsigned int, + HOST_WIDE_INT *, tree *, int); +extern rtx_insn *asan_emit_allocas_unpoison (rtx, rtx, rtx_insn *); +extern bool asan_protect_global (tree, bool ignore_decl_rtl_set_p = false); +extern void initialize_sanitizer_builtins (void); +extern tree asan_dynamic_init_call (bool); +extern bool asan_expand_check_ifn (gimple_stmt_iterator *, bool); +extern bool asan_expand_mark_ifn (gimple_stmt_iterator *); +extern bool asan_expand_poison_ifn (gimple_stmt_iterator *, bool *, + hash_map &); + +extern void hwasan_record_frame_init (); +extern void hwasan_record_stack_var (rtx, rtx, poly_int64, poly_int64); +extern void hwasan_emit_prologue (); +extern rtx_insn *hwasan_emit_untag_frame (rtx, rtx); +extern rtx hwasan_get_frame_extent (); +extern rtx hwasan_frame_base (); +extern void hwasan_maybe_emit_frame_base_init (void); +extern bool stack_vars_base_reg_p (rtx); +extern uint8_t hwasan_current_frame_tag (); +extern void hwasan_increment_frame_tag (); +extern rtx hwasan_truncate_to_tag_size (rtx, rtx); +extern void hwasan_finish_file (void); +extern bool hwasan_sanitize_p (void); +extern bool hwasan_sanitize_stack_p (void); +extern bool hwasan_sanitize_allocas_p (void); +extern bool hwasan_expand_check_ifn (gimple_stmt_iterator *, bool); +extern bool hwasan_expand_mark_ifn (gimple_stmt_iterator *); +extern bool gate_hwasan (void); + +extern gimple_stmt_iterator create_cond_insert_point + (gimple_stmt_iterator *, bool, bool, bool, basic_block *, basic_block *); + +/* Alias set for accessing the shadow memory. */ +extern alias_set_type asan_shadow_set; + +/* Hash set of labels that are either used in a goto, or their address + has been taken. */ +extern hash_set *asan_used_labels; + +/* Shadow memory is found at + (address >> ASAN_SHADOW_SHIFT) + asan_shadow_offset (). */ +#define ASAN_SHADOW_SHIFT 3 +#define ASAN_SHADOW_GRANULARITY (1UL << ASAN_SHADOW_SHIFT) + +/* Red zone size, stack and global variables are padded by ASAN_RED_ZONE_SIZE + up to 2 * ASAN_RED_ZONE_SIZE - 1 bytes. */ +#define ASAN_RED_ZONE_SIZE 32 + +/* Stack variable use more compact red zones. The size includes also + size of variable itself. */ + +#define ASAN_MIN_RED_ZONE_SIZE 16 + +/* Shadow memory values for stack protection. Left is below protected vars, + the first pointer in stack corresponding to that offset contains + ASAN_STACK_FRAME_MAGIC word, the second pointer to a string describing + the frame. Middle is for padding in between variables, right is + above the last protected variable and partial immediately after variables + up to ASAN_RED_ZONE_SIZE alignment. */ +#define ASAN_STACK_MAGIC_LEFT 0xf1 +#define ASAN_STACK_MAGIC_MIDDLE 0xf2 +#define ASAN_STACK_MAGIC_RIGHT 0xf3 +#define ASAN_STACK_MAGIC_USE_AFTER_RET 0xf5 +#define ASAN_STACK_MAGIC_USE_AFTER_SCOPE 0xf8 + +#define ASAN_STACK_FRAME_MAGIC 0x41b58ab3 +#define ASAN_STACK_RETIRED_MAGIC 0x45e0360e + +#define ASAN_USE_AFTER_SCOPE_ATTRIBUTE "use after scope memory" + +/* NOTE: The values below and the hooks under targetm.memtag define an ABI and + are hard-coded to these values in libhwasan, hence they can't be changed + independently here. */ +/* How many bits are used to store a tag in a pointer. + The default version uses the entire top byte of a pointer (i.e. 8 bits). */ +#define HWASAN_TAG_SIZE targetm.memtag.tag_size () +/* Tag Granule of HWASAN shadow stack. + This is the size in real memory that each byte in the shadow memory refers + to. I.e. if a variable is X bytes long in memory then its tag in shadow + memory will span X / HWASAN_TAG_GRANULE_SIZE bytes. + Most variables will need to be aligned to this amount since two variables + that are neighbors in memory and share a tag granule would need to share the + same tag (the shared tag granule can only store one tag). */ +#define HWASAN_TAG_GRANULE_SIZE targetm.memtag.granule_size () +/* Define the tag for the stack background. + This defines what tag the stack pointer will be and hence what tag all + variables that are not given special tags are (e.g. spilled registers, + and parameters passed on the stack). */ +#define HWASAN_STACK_BACKGROUND gen_int_mode (0, QImode) + +/* Various flags for Asan builtins. */ +enum asan_check_flags +{ + ASAN_CHECK_STORE = 1 << 0, + ASAN_CHECK_SCALAR_ACCESS = 1 << 1, + ASAN_CHECK_NON_ZERO_LEN = 1 << 2, + ASAN_CHECK_LAST = 1 << 3 +}; + +/* Flags for Asan check builtins. */ +#define IFN_ASAN_MARK_FLAGS DEF(POISON), DEF(UNPOISON) + +enum asan_mark_flags +{ +#define DEF(X) ASAN_MARK_##X + IFN_ASAN_MARK_FLAGS +#undef DEF +}; + +/* Return true if STMT is ASAN_MARK with FLAG as first argument. */ +extern bool asan_mark_p (gimple *stmt, enum asan_mark_flags flag); + +/* Return the size of padding needed to insert after a protected + decl of SIZE. */ + +static inline unsigned int +asan_red_zone_size (unsigned int size) +{ + unsigned int c = size & (ASAN_RED_ZONE_SIZE - 1); + return c ? 2 * ASAN_RED_ZONE_SIZE - c : ASAN_RED_ZONE_SIZE; +} + +/* Return how much a stack variable occupis on a stack + including a space for red zone. */ + +static inline unsigned HOST_WIDE_INT +asan_var_and_redzone_size (unsigned HOST_WIDE_INT size) +{ + if (size <= 4) + return 16; + else if (size <= 16) + return 32; + else if (size <= 128) + return size + 32; + else if (size <= 512) + return size + 64; + else if (size <= 4096) + return size + 128; + else + return size + 256; +} + +extern bool set_asan_shadow_offset (const char *); + +extern bool asan_shadow_offset_set_p (); + +extern void set_sanitized_sections (const char *); + +extern bool asan_sanitize_stack_p (void); + +extern bool asan_sanitize_allocas_p (void); + +extern hash_set *asan_handled_variables; + +/* Return TRUE if builtin with given FCODE will be intercepted by + libasan. */ + +static inline bool +asan_intercepted_p (enum built_in_function fcode) +{ + if (hwasan_sanitize_p ()) + return false; + + return fcode == BUILT_IN_INDEX + || fcode == BUILT_IN_MEMCHR + || fcode == BUILT_IN_MEMCMP + || fcode == BUILT_IN_MEMCPY + || fcode == BUILT_IN_MEMMOVE + || fcode == BUILT_IN_MEMSET + || fcode == BUILT_IN_STRCASECMP + || fcode == BUILT_IN_STRCAT + || fcode == BUILT_IN_STRCHR + || fcode == BUILT_IN_STRCMP + || fcode == BUILT_IN_STRCPY + || fcode == BUILT_IN_STRDUP + || fcode == BUILT_IN_STRLEN + || fcode == BUILT_IN_STRNCASECMP + || fcode == BUILT_IN_STRNCAT + || fcode == BUILT_IN_STRNCMP + || fcode == BUILT_IN_STRCSPN + || fcode == BUILT_IN_STRPBRK + || fcode == BUILT_IN_STRSPN + || fcode == BUILT_IN_STRSTR + || fcode == BUILT_IN_STRNCPY; +} + +/* Return TRUE if we should instrument for use-after-scope sanity checking. */ + +static inline bool +asan_sanitize_use_after_scope (void) +{ + return (flag_sanitize_address_use_after_scope + && (asan_sanitize_stack_p () || hwasan_sanitize_stack_p ())); +} + +/* Return true if DECL should be guarded on the stack. */ + +static inline bool +asan_protect_stack_decl (tree decl) +{ + return DECL_P (decl) + && (!DECL_ARTIFICIAL (decl) + || (asan_sanitize_use_after_scope () && TREE_ADDRESSABLE (decl))); +} + +/* Return true when flag_sanitize & FLAG is non-zero. If FN is non-null, + remove all flags mentioned in "no_sanitize" of DECL_ATTRIBUTES. */ + +static inline bool +sanitize_flags_p (unsigned int flag, const_tree fn = current_function_decl) +{ + unsigned int result_flags = flag_sanitize & flag; + if (result_flags == 0) + return false; + + if (fn != NULL_TREE) + { + tree value = lookup_attribute ("no_sanitize", DECL_ATTRIBUTES (fn)); + if (value) + result_flags &= ~tree_to_uhwi (TREE_VALUE (value)); + } + + return result_flags; +} + +/* Return true when coverage sanitization should happend for FN function. */ + +static inline bool +sanitize_coverage_p (const_tree fn = current_function_decl) +{ + return (flag_sanitize_coverage + && (fn == NULL_TREE + || lookup_attribute ("no_sanitize_coverage", + DECL_ATTRIBUTES (fn)) == NULL_TREE)); +} + +#endif /* TREE_ASAN */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/attr-fnspec.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/attr-fnspec.h new file mode 100644 index 0000000..d506c75 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/attr-fnspec.h @@ -0,0 +1,304 @@ +/* Handling of fnspec attribute specifiers + Copyright (C) 2008-2022 Free Software Foundation, Inc. + Contributed by Richard Guenther + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* Parse string of attribute "fn spec". This is an internal attribute + describing side effects of a function as follows: + + character 0 specifies properties of return values as follows: + '1'...'4' specifies number of argument function returns (as in memset) + 'm' specifies that returned value is noalias (as in malloc) + '.' specifies that nothing is known. + character 1 specifies additional function properties + ' ' specifies that nothing is known + 'p' or 'P' specifies that function is pure except for described side + effects. + 'c' or 'C' specifies that function is const except for described side + effects. + The uppercase letter in addition specifies that function clobbers errno. + + character 2+2i specifies properties of argument number i as follows: + 'x' or 'X' specifies that parameter is unused. + 'r' or 'R' specifies that the memory pointed to by the parameter is only + read and does not escape + 'o' or 'O' specifies that the memory pointed to by the parameter is only + written and does not escape + 'w' or 'W' specifies that the memory pointed to by the parameter does not + escape + '1'....'9' specifies that the memory pointed to by the parameter is + copied to memory pointed to by different parameter + (as in memcpy). + '.' specifies that nothing is known. + The uppercase letter in addition specifies that the memory pointed to + by the parameter is not dereferenced. For 'r' only read applies + transitively to pointers read from the pointed-to memory. + + character 3+2i specifies additional properties of argument number i + as follows: + ' ' nothing is known + 't' the size of value written/read corresponds to the size of + of the pointed-to type of the argument type + '1'...'9' specifies the size of value written/read is given by the + specified argument + */ + +#ifndef ATTR_FNSPEC_H +#define ATTR_FNSPEC_H + +class attr_fnspec +{ +private: + /* fn spec attribute string. */ + const char *str; + /* length of the fn spec string. */ + const unsigned len; + /* Number of characters specifying return value. */ + const unsigned int return_desc_size = 2; + /* Number of characters specifying size. */ + const unsigned int arg_desc_size = 2; + + /* Return start of specifier of arg i. */ + unsigned int arg_idx (int i) + { + return return_desc_size + arg_desc_size * i; + } + +public: + attr_fnspec (const char *str, unsigned len) + : str (str), len (len) + { + if (flag_checking) + verify (); + } + attr_fnspec (const char *str) + : str (str), len (strlen (str)) + { + if (flag_checking) + verify (); + } + attr_fnspec (const_tree identifier) + : str (TREE_STRING_POINTER (identifier)), + len (TREE_STRING_LENGTH (identifier)) + { + if (flag_checking) + verify (); + } + attr_fnspec () + : str (NULL), len (0) + { + } + + /* Return true if fn spec is known. */ + bool + known_p () + { + return len; + } + + /* Return true if arg I is specified. */ + bool + arg_specified_p (unsigned int i) + { + return len >= arg_idx (i + 1); + } + + /* True if the argument is not dereferenced recursively, thus only + directly reachable memory is read or written. */ + bool + arg_direct_p (unsigned int i) + { + unsigned int idx = arg_idx (i); + gcc_checking_assert (arg_specified_p (i)); + return str[idx] == 'R' || str[idx] == 'O' + || str[idx] == 'W' || (str[idx] >= '1' && str[idx] <= '9'); + } + + /* True if argument is used. */ + bool + arg_used_p (unsigned int i) + { + unsigned int idx = arg_idx (i); + gcc_checking_assert (arg_specified_p (i)); + return str[idx] != 'x' && str[idx] != 'X'; + } + + /* True if memory reached by the argument is readonly (not clobbered). */ + bool + arg_readonly_p (unsigned int i) + { + unsigned int idx = arg_idx (i); + gcc_checking_assert (arg_specified_p (i)); + return str[idx] == 'r' || str[idx] == 'R' || (str[idx] >= '1' && str[idx] <= '9'); + } + + /* True if memory reached by the argument is read (directly or indirectly) */ + bool + arg_maybe_read_p (unsigned int i) + { + unsigned int idx = arg_idx (i); + gcc_checking_assert (arg_specified_p (i)); + return str[idx] != 'o' && str[idx] != 'O' + && str[idx] != 'x' && str[idx] != 'X'; + } + + /* True if memory reached by the argument is written. + (directly or indirectly) */ + bool + arg_maybe_written_p (unsigned int i) + { + unsigned int idx = arg_idx (i); + gcc_checking_assert (arg_specified_p (i)); + return str[idx] != 'r' && str[idx] != 'R' + && (str[idx] < '1' || str[idx] > '9') + && str[idx] != 'x' && str[idx] != 'X'; + } + + /* Return true if load of memory pointed to by argument I is specified + by another argument. In this case set ARG. */ + bool + arg_max_access_size_given_by_arg_p (unsigned int i, unsigned int *arg) + { + unsigned int idx = arg_idx (i); + gcc_checking_assert (arg_specified_p (i)); + if (str[idx + 1] >= '1' && str[idx + 1] <= '9') + { + *arg = str[idx + 1] - '1'; + return true; + } + else + return false; + } + + /* Return true if the pointed-to type of the argument correspond to the + size of the memory acccess. */ + bool + arg_access_size_given_by_type_p (unsigned int i) + { + unsigned int idx = arg_idx (i); + gcc_checking_assert (arg_specified_p (i)); + return str[idx + 1] == 't'; + } + + /* Return true if memory pointer to by argument is copied to a memory + pointed to by a different argument (as in memcpy). + In this case set ARG. */ + bool + arg_copied_to_arg_p (unsigned int i, unsigned int *arg) + { + unsigned int idx = arg_idx (i); + gcc_checking_assert (arg_specified_p (i)); + if (str[idx] < '1' || str[idx] > '9') + return false; + *arg = str[idx] - '1'; + return true; + } + + + /* True if the argument does not escape. */ + bool + arg_noescape_p (unsigned int i) + { + unsigned int idx = arg_idx (i); + gcc_checking_assert (arg_specified_p (i)); + return str[idx] == 'w' || str[idx] == 'W' + || str[idx] == 'r' || str[idx] == 'R' + || str[idx] == 'o' || str[idx] == 'O'; + } + + /* Return true if function returns value of its parameter. If ARG_NO is + non-NULL return initialize it to the argument returned. */ + bool + returns_arg (unsigned int *arg_no) + { + if (str[0] >= '1' && str[0] <= '4') + { + if (arg_no) + *arg_no = str[0] - '1'; + return true; + } + return false; + } + + /* Nonzero if the return value does not alias with anything. Functions + with the malloc attribute have this set on their return value. */ + bool + returns_noalias_p () + { + return str[0] == 'm'; + } + + /* Return true if all memory read by the function is specified by fnspec. */ + bool + global_memory_read_p () + { + return str[1] != 'c' && str[1] != 'C'; + } + + /* Return true if all memory written by the function + is specified by fnspec. */ + bool + global_memory_written_p () + { + return str[1] != 'c' && str[1] != 'C' && str[1] != 'p' && str[1] != 'P'; + } + + bool + errno_maybe_written_p () + { + return str[1] == 'C' || str[1] == 'P'; + } + + /* Return EAF flags for arg I. */ + int + arg_eaf_flags (unsigned int i) + { + int flags = 0; + + if (!arg_specified_p (i)) + ; + else if (!arg_used_p (i)) + flags = EAF_UNUSED; + else + { + if (arg_direct_p (i)) + flags |= EAF_NO_INDIRECT_READ | EAF_NO_INDIRECT_ESCAPE + | EAF_NOT_RETURNED_INDIRECTLY | EAF_NO_INDIRECT_CLOBBER; + if (arg_noescape_p (i)) + flags |= EAF_NO_DIRECT_ESCAPE | EAF_NO_INDIRECT_ESCAPE; + if (arg_readonly_p (i)) + flags |= EAF_NO_DIRECT_CLOBBER | EAF_NO_INDIRECT_CLOBBER; + } + return flags; + } + + /* Check validity of the string. */ + void verify (); + + /* Return the fnspec string. */ + const char * + get_str () + { + return str; + } +}; + +extern attr_fnspec gimple_call_fnspec (const gcall *stmt); +extern attr_fnspec builtin_fnspec (tree); + +#endif /* ATTR_FNSPEC_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/attribs.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/attribs.h new file mode 100644 index 0000000..5b6f63e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/attribs.h @@ -0,0 +1,346 @@ +/* Declarations and definitions dealing with attribute handling. + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_ATTRIBS_H +#define GCC_ATTRIBS_H + +extern const struct attribute_spec *lookup_attribute_spec (const_tree); +extern void free_attr_data (); +extern void init_attributes (void); + +/* Process the attributes listed in ATTRIBUTES and install them in *NODE, + which is either a DECL (including a TYPE_DECL) or a TYPE. If a DECL, + it should be modified in place; if a TYPE, a copy should be created + unless ATTR_FLAG_TYPE_IN_PLACE is set in FLAGS. FLAGS gives further + information, in the form of a bitwise OR of flags in enum attribute_flags + from tree.h. Depending on these flags, some attributes may be + returned to be applied at a later stage (for example, to apply + a decl attribute to the declaration rather than to its type). */ +extern tree decl_attributes (tree *, tree, int, tree = NULL_TREE); + +extern bool cxx11_attribute_p (const_tree); +extern tree get_attribute_name (const_tree); +extern tree get_attribute_namespace (const_tree); +extern void apply_tm_attr (tree, tree); +extern tree make_attribute (const char *, const char *, tree); +extern bool attribute_ignored_p (tree); +extern bool attribute_ignored_p (const attribute_spec *const); + +extern struct scoped_attributes* register_scoped_attributes (const struct attribute_spec *, + const char *, + bool = false); + +extern char *sorted_attr_string (tree); +extern bool common_function_versions (tree, tree); +extern tree make_dispatcher_decl (const tree); +extern bool is_function_default_version (const tree); +extern void handle_ignored_attributes_option (vec *); + +/* Return a type like TTYPE except that its TYPE_ATTRIBUTES + is ATTRIBUTE. + + Such modified types already made are recorded so that duplicates + are not made. */ + +extern tree build_type_attribute_variant (tree, tree); +extern tree build_decl_attribute_variant (tree, tree); +extern tree build_type_attribute_qual_variant (tree, tree, int); + +extern bool simple_cst_list_equal (const_tree, const_tree); +extern bool attribute_value_equal (const_tree, const_tree); + +/* Return 0 if the attributes for two types are incompatible, 1 if they + are compatible, and 2 if they are nearly compatible (which causes a + warning to be generated). */ +extern int comp_type_attributes (const_tree, const_tree); + +extern tree affects_type_identity_attributes (tree, bool = true); +extern tree restrict_type_identity_attributes_to (tree, tree); + +/* Default versions of target-overridable functions. */ +extern tree merge_decl_attributes (tree, tree); +extern tree merge_type_attributes (tree, tree); + +/* Remove any instances of attribute ATTR_NAME in LIST and return the + modified list. */ + +extern tree remove_attribute (const char *, tree); + +/* Given two attributes lists, return a list of their union. */ + +extern tree merge_attributes (tree, tree); + +/* Duplicate all attributes with name NAME in ATTR list to *ATTRS if + they are missing there. */ + +extern void duplicate_one_attribute (tree *, tree, const char *); + +/* Duplicate all attributes from user DECL to the corresponding + builtin that should be propagated. */ + +extern void copy_attributes_to_builtin (tree); + +/* Given two Windows decl attributes lists, possibly including + dllimport, return a list of their union . */ +extern tree merge_dllimport_decl_attributes (tree, tree); + +/* Handle a "dllimport" or "dllexport" attribute. */ +extern tree handle_dll_attribute (tree *, tree, tree, int, bool *); + +extern int attribute_list_equal (const_tree, const_tree); +extern int attribute_list_contained (const_tree, const_tree); + +/* The backbone of lookup_attribute(). ATTR_LEN is the string length + of ATTR_NAME, and LIST is not NULL_TREE. + + The function is called from lookup_attribute in order to optimize + for size. */ +extern tree private_lookup_attribute (const char *attr_name, size_t attr_len, + tree list); + +extern unsigned decls_mismatched_attributes (tree, tree, tree, + const char* const[], + pretty_printer*); + +extern void maybe_diag_alias_attributes (tree, tree); + +/* For a given string S of length L, strip leading and trailing '_' characters + so that we have a canonical form of attribute names. NB: This function may + change S and L. */ + +template +inline bool +canonicalize_attr_name (const char *&s, T &l) +{ + if (l > 4 && s[0] == '_' && s[1] == '_' && s[l - 1] == '_' && s[l - 2] == '_') + { + s += 2; + l -= 4; + return true; + } + return false; +} + +/* For a given IDENTIFIER_NODE, strip leading and trailing '_' characters + so that we have a canonical form of attribute names. */ + +static inline tree +canonicalize_attr_name (tree attr_name) +{ + size_t l = IDENTIFIER_LENGTH (attr_name); + const char *s = IDENTIFIER_POINTER (attr_name); + + if (canonicalize_attr_name (s, l)) + return get_identifier_with_length (s, l); + + return attr_name; +} + +/* Compare attribute identifiers ATTR1 and ATTR2 with length ATTR1_LEN and + ATTR2_LEN. */ + +static inline bool +cmp_attribs (const char *attr1, size_t attr1_len, + const char *attr2, size_t attr2_len) +{ + return attr1_len == attr2_len && strncmp (attr1, attr2, attr1_len) == 0; +} + +/* Compare attribute identifiers ATTR1 and ATTR2. */ + +static inline bool +cmp_attribs (const char *attr1, const char *attr2) +{ + return cmp_attribs (attr1, strlen (attr1), attr2, strlen (attr2)); +} + +/* Given an identifier node IDENT and a string ATTR_NAME, return true + if the identifier node is a valid attribute name for the string. */ + +static inline bool +is_attribute_p (const char *attr_name, const_tree ident) +{ + return cmp_attribs (attr_name, strlen (attr_name), + IDENTIFIER_POINTER (ident), IDENTIFIER_LENGTH (ident)); +} + +/* Given an attribute name ATTR_NAME and a list of attributes LIST, + return a pointer to the attribute's list element if the attribute + is part of the list, or NULL_TREE if not found. If the attribute + appears more than once, this only returns the first occurrence; the + TREE_CHAIN of the return value should be passed back in if further + occurrences are wanted. ATTR_NAME must be in the form 'text' (not + '__text__'). */ + +static inline tree +lookup_attribute (const char *attr_name, tree list) +{ + if (CHECKING_P && attr_name[0] != '_') + { + size_t attr_len = strlen (attr_name); + gcc_checking_assert (!canonicalize_attr_name (attr_name, attr_len)); + } + /* In most cases, list is NULL_TREE. */ + if (list == NULL_TREE) + return NULL_TREE; + else + { + size_t attr_len = strlen (attr_name); + /* Do the strlen() before calling the out-of-line implementation. + In most cases attr_name is a string constant, and the compiler + will optimize the strlen() away. */ + return private_lookup_attribute (attr_name, attr_len, list); + } +} + +/* Given an attribute name ATTR_NAME and a list of attributes LIST, + return a pointer to the attribute's list first element if the attribute + starts with ATTR_NAME. ATTR_NAME must be in the form 'text' (not + '__text__'). */ + +static inline tree +lookup_attribute_by_prefix (const char *attr_name, tree list) +{ + gcc_checking_assert (attr_name[0] != '_'); + /* In most cases, list is NULL_TREE. */ + if (list == NULL_TREE) + return NULL_TREE; + else + { + size_t attr_len = strlen (attr_name); + while (list) + { + tree name = get_attribute_name (list); + size_t ident_len = IDENTIFIER_LENGTH (name); + + if (attr_len > ident_len) + { + list = TREE_CHAIN (list); + continue; + } + + const char *p = IDENTIFIER_POINTER (name); + gcc_checking_assert (attr_len == 0 || p[0] != '_'); + + if (strncmp (attr_name, p, attr_len) == 0) + break; + + list = TREE_CHAIN (list); + } + + return list; + } +} + +/* Description of a function argument declared with attribute access. + Used as an "iterator" over all such arguments in a function declaration + or call. */ + +struct attr_access +{ + /* The beginning and end of the internal string representation. */ + const char *str, *end; + /* The attribute pointer argument. */ + tree ptr; + /* For a declaration, a TREE_CHAIN of VLA bound expressions stored + in TREE_VALUE and their positions in the argument list (stored + in TREE_PURPOSE). Each expression may be a PARM_DECL or some + other DECL (for ordinary variables), or an EXPR for other + expressions (e.g., funcion calls). */ + tree size; + + /* The zero-based position of each of the formal function arguments. + For the optional SIZARG, UINT_MAX when not specified. For VLAs + with multiple variable bounds, SIZARG is the position corresponding + to the most significant bound in the argument list. Positions of + subsequent bounds are in the TREE_PURPOSE field of the SIZE chain. */ + unsigned ptrarg; + unsigned sizarg; + /* For internal specifications only, the constant minimum size of + the array, zero if not specified, and HWI_M1U for the unspecified + VLA [*] notation. Meaningless for external (explicit) access + specifications. */ + unsigned HOST_WIDE_INT minsize; + + /* The access mode. */ + access_mode mode; + + /* Set for an attribute added internally rather than by an explicit + declaration. */ + bool internal_p; + /* Set for the T[static MINSIZE] array notation for nonzero MINSIZE + less than HWI_M1U. */ + bool static_p; + + /* Return the number of specified VLA bounds. */ + unsigned vla_bounds (unsigned *) const; + + /* Return internal representation as STRING_CST. */ + tree to_internal_string () const; + + /* Return the human-readable representation of the external attribute + specification (as it might appear in the source code) as STRING_CST. */ + tree to_external_string () const; + + /* Return argument of array type formatted as a readable string. */ + std::string array_as_string (tree) const; + + /* Return the access mode corresponding to the character code. */ + static access_mode from_mode_char (char); + + /* Reset front end-specific attribute access data from attributes. */ + static void free_lang_data (tree); + + /* The character codes corresponding to all the access modes. */ + static constexpr char mode_chars[5] = { '-', 'r', 'w', 'x', '^' }; + + /* The strings corresponding to just the external access modes. */ + static constexpr char mode_names[4][11] = + { + "none", "read_only", "write_only", "read_write" + }; +}; + +inline access_mode +attr_access::from_mode_char (char c) +{ + switch (c) + { + case mode_chars[access_none]: return access_none; + case mode_chars[access_read_only]: return access_read_only; + case mode_chars[access_write_only]: return access_write_only; + case mode_chars[access_read_write]: return access_read_write; + case mode_chars[access_deferred]: return access_deferred; + } + gcc_unreachable (); +} + +/* Used to define rdwr_map below. */ +struct rdwr_access_hash: int_hash { }; + +/* A mapping between argument number corresponding to attribute access + mode (read_only, write_only, or read_write) and operands. */ +struct attr_access; +typedef hash_map rdwr_map; + +extern void init_attr_rdwr_indices (rdwr_map *, tree); +extern attr_access *get_parm_access (rdwr_map &, tree, + tree = current_function_decl); + +#endif // GCC_ATTRIBS_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/auto-host.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/auto-host.h new file mode 100644 index 0000000..1eb39b1 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/auto-host.h @@ -0,0 +1,2735 @@ +/* auto-host.h. Generated from config.in by configure. */ +/* config.in. Generated from configure.ac by autoheader. */ + +/* Define if this compiler should be built as the offload target compiler. */ +#ifndef USED_FOR_TARGET +/* #undef ACCEL_COMPILER */ +#endif + + +/* Define if building universal (internal helper macro) */ +#ifndef USED_FOR_TARGET +/* #undef AC_APPLE_UNIVERSAL_BUILD */ +#endif + + +/* Define to the assembler option to enable compressed debug sections. */ +#ifndef USED_FOR_TARGET +#define AS_COMPRESS_DEBUG_OPTION "--compress-debug-sections" +#endif + + +/* Define to the assembler option to disable compressed debug sections. */ +#ifndef USED_FOR_TARGET +#define AS_NO_COMPRESS_DEBUG_OPTION "--nocompress-debug-sections" +#endif + + +/* Define to the root for URLs about GCC changes. */ +#ifndef USED_FOR_TARGET +#define CHANGES_ROOT_URL "https://gcc.gnu.org/" +#endif + + +/* Define as the number of bits in a byte, if `limits.h' doesn't. */ +#ifndef USED_FOR_TARGET +/* #undef CHAR_BIT */ +#endif + + +/* Define to 0/1 if you want more run-time sanity checks. This one gets a grab + bag of miscellaneous but relatively cheap checks. */ +#ifndef USED_FOR_TARGET +#define CHECKING_P 0 +#endif + + +/* Define 0/1 to force the choice for exception handling model. */ +#ifndef USED_FOR_TARGET +/* #undef CONFIG_SJLJ_EXCEPTIONS */ +#endif + + +/* Define to enable the use of a default assembler. */ +#ifndef USED_FOR_TARGET +/* #undef DEFAULT_ASSEMBLER */ +#endif + + +/* Define to enable the use of a default debug linker. */ +#ifndef USED_FOR_TARGET +/* #undef DEFAULT_DSYMUTIL */ +#endif + + +/* Define to enable the use of a default linker. */ +#ifndef USED_FOR_TARGET +/* #undef DEFAULT_LINKER */ +#endif + + +/* Define to larger than zero set the default stack clash protector size. */ +#ifndef USED_FOR_TARGET +#define DEFAULT_STK_CLASH_GUARD_SIZE 0 +#endif + + +/* Define if you want to use __cxa_atexit, rather than atexit, to register C++ + destructors for local statics and global objects. This is essential for + fully standards-compliant handling of destructors, but requires + __cxa_atexit in libc. */ +#ifndef USED_FOR_TARGET +#define DEFAULT_USE_CXA_ATEXIT 2 +#endif + + +/* The default for -fdiagnostics-color option */ +#ifndef USED_FOR_TARGET +#define DIAGNOSTICS_COLOR_DEFAULT DIAGNOSTICS_COLOR_AUTO +#endif + + +/* The default for -fdiagnostics-urls option */ +#ifndef USED_FOR_TARGET +#define DIAGNOSTICS_URLS_DEFAULT DIAGNOSTICS_URL_AUTO +#endif + + +/* Define to the root for documentation URLs. */ +#ifndef USED_FOR_TARGET +#define DOCUMENTATION_ROOT_URL "https://gcc.gnu.org/onlinedocs/" +#endif + + +/* Define to the dsymutil version. */ +#ifndef USED_FOR_TARGET +/* #undef DSYMUTIL_VERSION */ +#endif + + +/* Define 0/1 if static analyzer feature is enabled. */ +#ifndef USED_FOR_TARGET +#define ENABLE_ANALYZER 1 +#endif + + +/* Define if you want assertions enabled. This is a cheap check. */ +#ifndef USED_FOR_TARGET +#define ENABLE_ASSERT_CHECKING 1 +#endif + + +/* Define to 1 to specify that we are using the BID decimal floating point + format instead of DPD */ +#ifndef USED_FOR_TARGET +#define ENABLE_DECIMAL_BID_FORMAT 1 +#endif + + +/* Define to 1 to enable decimal float extension to C. */ +#ifndef USED_FOR_TARGET +#define ENABLE_DECIMAL_FLOAT 1 +#endif + + +/* Define if your target supports default PIE and it is enabled. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_DEFAULT_PIE */ +#endif + + +/* Define if your target supports default stack protector and it is enabled. + */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_DEFAULT_SSP */ +#endif + + +/* Define if you want more run-time sanity checks for dataflow. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_DF_CHECKING */ +#endif + + +/* Define to 0/1 if you want extra run-time checking that might affect code + generation. */ +#ifndef USED_FOR_TARGET +#define ENABLE_EXTRA_CHECKING 0 +#endif + + +/* Define to 1 to enable fixed-point arithmetic extension to C. */ +#ifndef USED_FOR_TARGET +#define ENABLE_FIXED_POINT 0 +#endif + + +/* Define if you want fold checked that it never destructs its argument. This + is quite expensive. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_FOLD_CHECKING */ +#endif + + +/* Define if you want the garbage collector to operate in maximally paranoid + mode, validating the entire heap and collecting garbage at every + opportunity. This is extremely expensive. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_GC_ALWAYS_COLLECT */ +#endif + + +/* Define if you want the garbage collector to do object poisoning and other + memory allocation checks. This is quite expensive. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_GC_CHECKING */ +#endif + + +/* Define if you want operations on GIMPLE (the basic data structure of the + high-level optimizers) to be checked for dynamic type safety at runtime. + This is moderately expensive. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_GIMPLE_CHECKING */ +#endif + + +/* Define if gcc should always pass --build-id to linker. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_LD_BUILDID */ +#endif + + +/* Define to 1 to enable libquadmath support */ +#ifndef USED_FOR_TARGET +#define ENABLE_LIBQUADMATH_SUPPORT 1 +#endif + + +/* Define to enable LTO support. */ +#ifndef USED_FOR_TARGET +#define ENABLE_LTO 1 +#endif + + +/* Define to 1 if translation of program messages to the user's native + language is requested. */ +#ifndef USED_FOR_TARGET +#define ENABLE_NLS 1 +#endif + + +/* Define this to enable support for offloading. */ +#ifndef USED_FOR_TARGET +#define ENABLE_OFFLOADING 0 +#endif + + +/* Define to enable plugin support. */ +#ifndef USED_FOR_TARGET +#define ENABLE_PLUGIN 1 +#endif + + +/* Define if you want all operations on RTL (the basic data structure of the + optimizer and back end) to be checked for dynamic type safety at runtime. + This is quite expensive. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_RTL_CHECKING */ +#endif + + +/* Define if you want RTL flag accesses to be checked against the RTL codes + that are supported for each access macro. This is relatively cheap. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_RTL_FLAG_CHECKING */ +#endif + + +/* Define if you want runtime assertions enabled. This is a cheap check. */ +#define ENABLE_RUNTIME_CHECKING 1 + +/* Define to enable evaluating float expressions with double precision in + standards-compatible mode on s390 targets. */ +/* #undef ENABLE_S390_EXCESS_FLOAT_PRECISION */ + +/* Define if the -stdlib= option should be enabled. */ +#ifndef USED_FOR_TARGET +#define ENABLE_STDLIB_OPTION 0 +#endif + + +/* Define if you want all operations on trees (the basic data structure of the + front ends) to be checked for dynamic type safety at runtime. This is + moderately expensive. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_TREE_CHECKING */ +#endif + + +/* Define if you want all gimple types to be verified after gimplifiation. + This is cheap. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_TYPES_CHECKING */ +#endif + + +/* Define to get calls to the valgrind runtime enabled. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_VALGRIND_ANNOTATIONS */ +#endif + + +/* Define if you want to run subprograms and generated programs through + valgrind (a memory checker). This is extremely expensive. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_VALGRIND_CHECKING */ +#endif + + +/* Define 0/1 if vtable verification feature is enabled. */ +#ifndef USED_FOR_TARGET +#define ENABLE_VTABLE_VERIFY 0 +#endif + + +/* Define to 1 if installation paths should be looked up in the Windows + Registry. Ignored on non-Windows hosts. */ +#ifndef USED_FOR_TARGET +/* #undef ENABLE_WIN32_REGISTRY */ +#endif + + +/* Define to the name of a file containing a list of extra machine modes for + this architecture. */ +#ifndef USED_FOR_TARGET +#define EXTRA_MODES_FILE "config/i386/i386-modes.def" +#endif + + +/* Define to enable detailed memory allocation stats gathering. */ +#ifndef USED_FOR_TARGET +#define GATHER_STATISTICS 0 +#endif + + +/* Define to 1 if `TIOCGWINSZ' requires . */ +#ifndef USED_FOR_TARGET +#define GWINSZ_IN_SYS_IOCTL 1 +#endif + + +/* mcontext_t fields start with __ */ +#ifndef USED_FOR_TARGET +/* #undef HAS_MCONTEXT_T_UNDERSCORES */ +#endif + + +/* Define if AF_INET6 supported. */ +#ifndef USED_FOR_TARGET +#define HAVE_AF_INET6 1 +#endif + + +/* Define if AF_UNIX supported. */ +#ifndef USED_FOR_TARGET +#define HAVE_AF_UNIX 1 +#endif + + +/* Define if your assembler supports architecture modifiers. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_ARCHITECTURE_MODIFIERS */ +#endif + + +/* Define if your avr assembler supports -mgcc-isr option. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_AVR_MGCCISR_OPTION */ +#endif + + +/* Define if your avr assembler supports --mlink-relax option. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_AVR_MLINK_RELAX_OPTION */ +#endif + + +/* Define if your avr assembler supports -mrmw option. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_AVR_MRMW_OPTION */ +#endif + + +/* Define to the level of your assembler's compressed debug section support. + */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_COMPRESS_DEBUG 2 +#endif + + +/* Define if your assembler supports the --debug-prefix-map option. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_DEBUG_PREFIX_MAP 1 +#endif + + +/* Define if your assembler supports .module. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_DOT_MODULE */ +#endif + + +/* Define if your assembler supports DSPR1 mult. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_DSPR1_MULT */ +#endif + + +/* Define if your assembler supports .dtprelword. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_DTPRELWORD */ +#endif + + +/* Define if your assembler supports dwarf2 .file/.loc directives, and + preserves file table indices exactly as given. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_DWARF2_DEBUG_LINE 1 +#endif + + +/* Define if your assembler supports views in dwarf2 .loc directives. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_DWARF2_DEBUG_VIEW 1 +#endif + + +/* Define if your assembler supports the R_PPC64_ENTRY relocation. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_ENTRY_MARKERS */ +#endif + + +/* Define if your assembler supports explicit relocations. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_EXPLICIT_RELOCS */ +#endif + + +/* Define if your assembler supports FMAF, HPC, and VIS 3.0 instructions. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_FMAF_HPC_VIS3 */ +#endif + + +/* Define if your assembler supports the --gdwarf2 option. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_GDWARF2_DEBUG_FLAG 1 +#endif + + +/* Define if your assembler supports the --gdwarf-5 option. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_GDWARF_5_DEBUG_FLAG 1 +#endif + + +/* Define if your assembler supports .gnu_attribute. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_GNU_ATTRIBUTE */ +#endif + + +/* Define true if the assembler supports '.long foo@GOTOFF'. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_GOTOFF_IN_DATA 1 +#endif + + +/* Define if your assembler supports the --gstabs option. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_GSTABS_DEBUG_FLAG 1 +#endif + + +/* Define if your assembler supports the Sun syntax for cmov. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_IX86_CMOV_SUN_SYNTAX */ +#endif + + +/* Define if your assembler supports the subtraction of symbols in different + sections. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_DIFF_SECT_DELTA 1 +#endif + + +/* Define if your assembler supports the ffreep mnemonic. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_FFREEP 1 +#endif + + +/* Define if your assembler uses fildq and fistq mnemonics. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_FILDQ 1 +#endif + + +/* Define if your assembler uses filds and fists mnemonics. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_FILDS 1 +#endif + + +/* Define 0/1 if your assembler and linker support @GOT. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_GOT32X 1 +#endif + + +/* Define if your assembler supports HLE prefixes. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_HLE 1 +#endif + + +/* Define if your assembler supports interunit movq mnemonic. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_INTERUNIT_MOVQ 1 +#endif + + +/* Define if your assembler supports the .quad directive. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_QUAD 1 +#endif + + +/* Define if the assembler supports 'rep , lock '. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_REP_LOCK_PREFIX 1 +#endif + + +/* Define if your assembler supports the sahf mnemonic in 64bit mode. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_SAHF 1 +#endif + + +/* Define if your assembler supports the swap suffix. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_SWAP 1 +#endif + + +/* Define if your assembler and linker support @tlsgdplt. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_IX86_TLSGDPLT */ +#endif + + +/* Define to 1 if your assembler and linker support @tlsldm. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_TLSLDM 0 +#endif + + +/* Define to 1 if your assembler and linker support @tlsldmplt. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_TLSLDMPLT 0 +#endif + + +/* Define 0/1 if your assembler and linker support calling ___tls_get_addr via + GOT. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_TLS_GET_ADDR_GOT 1 +#endif + + +/* Define if your assembler supports the 'ud2' mnemonic. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_IX86_UD2 1 +#endif + + +/* Define if your assembler supports the lituse_jsrdirect relocation. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_JSRDIRECT_RELOCS */ +#endif + + +/* Define if your assembler supports .sleb128 and .uleb128. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_LEB128 1 +#endif + + +/* Define if your assembler supports LEON instructions. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_LEON */ +#endif + + +/* Define if the assembler won't complain about a line such as # 0 "" 2. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_LINE_ZERO 1 +#endif + + +/* Define if your assembler supports ltoffx and ldxmov relocations. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_LTOFFX_LDXMOV_RELOCS */ +#endif + + +/* Define if your assembler supports the -mabi option. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_MABI_OPTION */ +#endif + + +/* Define if your assembler supports .machine and .machinemode. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_MACHINE_MACHINEMODE */ +#endif + + +/* Define if the assembler understands -march=rv*_zifencei. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_MARCH_ZIFENCEI */ +#endif + + +/* Define if your assembler supports mfcr field. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_MFCRF */ +#endif + + +/* Define if the assembler understands -misa-spec=. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_MISA_SPEC */ +#endif + + +/* Define if your Mac OS X assembler supports -mllvm -x86-pad-for-align=false. + */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_MLLVM_X86_PAD_FOR_ALIGN */ +#endif + + +/* Define if your Mac OS X assembler supports the -mmacos-version-min option. + */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_MMACOSX_VERSION_MIN_OPTION */ +#endif + + +/* Define if your assembler supports .mspabi_attribute. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_MSPABI_ATTRIBUTE */ +#endif + + +/* Define if the assembler understands -mnan=. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_NAN */ +#endif + + +/* Define if your assembler supports %gotoff relocation syntax. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_NIOS2_GOTOFF_RELOCATION */ +#endif + + +/* Define if your assembler supports the -no-mul-bug-abort option. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_NO_MUL_BUG_ABORT_OPTION */ +#endif + + +/* Define if the assembler understands -mno-shared. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_NO_SHARED */ +#endif + + +/* Define if your assembler supports offsetable %lo(). */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_OFFSETABLE_LO10 */ +#endif + + +/* Define if your assembler supports R_PPC*_PLTSEQ relocations. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_PLTSEQ */ +#endif + + +/* Define if your assembler supports .ref */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_REF */ +#endif + + +/* Define if your assembler supports R_PPC_REL16 relocs. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_REL16 */ +#endif + + +/* Define if your assembler supports -relax option. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_RELAX_OPTION */ +#endif + + +/* Define if your assembler supports .attribute. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_RISCV_ATTRIBUTE */ +#endif + + +/* Define if your assembler supports relocs needed by -fpic. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_SMALL_PIC_RELOCS */ +#endif + + +/* Define if your assembler supports SPARC4 instructions. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_SPARC4 */ +#endif + + +/* Define if your assembler supports SPARC5 and VIS 4.0 instructions. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_SPARC5_VIS4 */ +#endif + + +/* Define if your assembler supports SPARC6 instructions. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_SPARC6 */ +#endif + + +/* Define if your assembler and linker support GOTDATA_OP relocs. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_SPARC_GOTDATA_OP */ +#endif + + +/* Define if your assembler and linker support unaligned PC relative relocs. + */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_SPARC_UA_PCREL */ +#endif + + +/* Define if your assembler and linker support unaligned PC relative relocs + against hidden symbols. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_SPARC_UA_PCREL_HIDDEN */ +#endif + + +/* Define if your assembler supports .stabs. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_STABS_DIRECTIVE 1 +#endif + + +/* Define if your assembler and linker support thread-local storage. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_TLS 1 +#endif + + +/* Define if your assembler supports vl/vst/vlm/vstm with an optional + alignment hint argument. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_VECTOR_LOADSTORE_ALIGNMENT_HINTS */ +#endif + + +/* Define if your assembler supports vl/vst/vlm/vstm with an optional + alignment hint argument on z13. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_VECTOR_LOADSTORE_ALIGNMENT_HINTS_ON_Z13 */ +#endif + + +/* Define if your assembler supports VSX instructions. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_VSX */ +#endif + + +/* Define if your assembler supports --gdwarf-4/--gdwarf-5 even with compiler + generated .debug_line. */ +#ifndef USED_FOR_TARGET +#define HAVE_AS_WORKING_DWARF_N_FLAG 1 +#endif + + +/* Define if your assembler supports -xbrace_comment option. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_AS_XBRACE_COMMENT_OPTION */ +#endif + + +/* Define to 1 if you have the `atoq' function. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_ATOQ */ +#endif + + +/* Define to 1 if you have the `clearerr_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_CLEARERR_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `clock' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_CLOCK 1 +#endif + + +/* Define if defines clock_t. */ +#ifndef USED_FOR_TARGET +#define HAVE_CLOCK_T 1 +#endif + + +/* Define 0/1 if your assembler and linker support COMDAT groups. */ +#ifndef USED_FOR_TARGET +#define HAVE_COMDAT_GROUP 1 +#endif + + +/* Define to 1 if we found a declaration for 'abort', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_ABORT 1 +#endif + + +/* Define to 1 if we found a declaration for 'asprintf', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_ASPRINTF 1 +#endif + + +/* Define to 1 if we found a declaration for 'atof', otherwise define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_ATOF 1 +#endif + + +/* Define to 1 if we found a declaration for 'atol', otherwise define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_ATOL 1 +#endif + + +/* Define to 1 if we found a declaration for 'atoll', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_ATOLL 1 +#endif + + +/* Define to 1 if you have the declaration of `basename(const char*)', and to + 0 if you don't. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_BASENAME 1 +#endif + + +/* Define to 1 if we found a declaration for 'calloc', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_CALLOC 1 +#endif + + +/* Define to 1 if we found a declaration for 'clearerr_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_CLEARERR_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'clock', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_CLOCK 1 +#endif + + +/* Define to 1 if we found a declaration for 'errno', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_ERRNO 1 +#endif + + +/* Define to 1 if we found a declaration for 'feof_unlocked', otherwise define + to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FEOF_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'ferror_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FERROR_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'fflush_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FFLUSH_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'ffs', otherwise define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FFS 1 +#endif + + +/* Define to 1 if we found a declaration for 'fgetc_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FGETC_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'fgets_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FGETS_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'fileno_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FILENO_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'fprintf_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FPRINTF_UNLOCKED 0 +#endif + + +/* Define to 1 if we found a declaration for 'fputc_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FPUTC_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'fputs_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FPUTS_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'fread_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FREAD_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'free', otherwise define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FREE 1 +#endif + + +/* Define to 1 if we found a declaration for 'fwrite_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_FWRITE_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'getchar_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_GETCHAR_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'getcwd', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_GETCWD 1 +#endif + + +/* Define to 1 if we found a declaration for 'getc_unlocked', otherwise define + to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_GETC_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'getenv', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_GETENV 1 +#endif + + +/* Define to 1 if we found a declaration for 'getopt', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_GETOPT 1 +#endif + + +/* Define to 1 if we found a declaration for 'getpagesize', otherwise define + to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_GETPAGESIZE 1 +#endif + + +/* Define to 1 if we found a declaration for 'getrlimit', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_GETRLIMIT 1 +#endif + + +/* Define to 1 if we found a declaration for 'getrusage', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_GETRUSAGE 1 +#endif + + +/* Define to 1 if we found a declaration for 'getwd', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_GETWD 1 +#endif + + +/* Define to 1 if we found a declaration for 'ldgetname', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_LDGETNAME 0 +#endif + + +/* Define to 1 if we found a declaration for 'madvise', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_MADVISE 1 +#endif + + +/* Define to 1 if we found a declaration for 'mallinfo', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_MALLINFO 1 +#endif + + +/* Define to 1 if we found a declaration for 'mallinfo2', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_MALLINFO2 1 +#endif + + +/* Define to 1 if we found a declaration for 'malloc', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_MALLOC 1 +#endif + + +/* Define to 1 if we found a declaration for 'putchar_unlocked', otherwise + define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_PUTCHAR_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'putc_unlocked', otherwise define + to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_PUTC_UNLOCKED 1 +#endif + + +/* Define to 1 if we found a declaration for 'realloc', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_REALLOC 1 +#endif + + +/* Define to 1 if we found a declaration for 'sbrk', otherwise define to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_SBRK 1 +#endif + + +/* Define to 1 if we found a declaration for 'setenv', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_SETENV 1 +#endif + + +/* Define to 1 if we found a declaration for 'setrlimit', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_SETRLIMIT 1 +#endif + + +/* Define to 1 if we found a declaration for 'sigaltstack', otherwise define + to 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_SIGALTSTACK 1 +#endif + + +/* Define to 1 if we found a declaration for 'snprintf', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_SNPRINTF 1 +#endif + + +/* Define to 1 if we found a declaration for 'stpcpy', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_STPCPY 1 +#endif + + +/* Define to 1 if we found a declaration for 'strnlen', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_STRNLEN 1 +#endif + + +/* Define to 1 if we found a declaration for 'strsignal', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_STRSIGNAL 1 +#endif + + +/* Define to 1 if you have the declaration of `strstr(const char*,const + char*)', and to 0 if you don't. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_STRSTR 1 +#endif + + +/* Define to 1 if we found a declaration for 'strtol', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_STRTOL 1 +#endif + + +/* Define to 1 if we found a declaration for 'strtoll', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_STRTOLL 1 +#endif + + +/* Define to 1 if we found a declaration for 'strtoul', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_STRTOUL 1 +#endif + + +/* Define to 1 if we found a declaration for 'strtoull', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_STRTOULL 1 +#endif + + +/* Define to 1 if we found a declaration for 'strverscmp', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_STRVERSCMP 1 +#endif + + +/* Define to 1 if we found a declaration for 'times', otherwise define to 0. + */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_TIMES 1 +#endif + + +/* Define to 1 if we found a declaration for 'unsetenv', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_UNSETENV 1 +#endif + + +/* Define to 1 if we found a declaration for 'vasprintf', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_VASPRINTF 1 +#endif + + +/* Define to 1 if we found a declaration for 'vsnprintf', otherwise define to + 0. */ +#ifndef USED_FOR_TARGET +#define HAVE_DECL_VSNPRINTF 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_DIRECT_H */ +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_DLFCN_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_EXT_HASH_MAP 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_FCNTL_H 1 +#endif + + +/* Define to 1 if you have the `feof_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_FEOF_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `ferror_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_FERROR_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `fflush_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_FFLUSH_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `fgetc_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_FGETC_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `fgets_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_FGETS_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `fileno_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_FILENO_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `fork' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_FORK 1 +#endif + + +/* Define to 1 if you have the `fprintf_unlocked' function. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_FPRINTF_UNLOCKED */ +#endif + + +/* Define to 1 if you have the `fputc_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_FPUTC_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `fputs_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_FPUTS_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `fread_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_FREAD_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `fstatat' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_FSTATAT 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_FTW_H 1 +#endif + + +/* Define to 1 if you have the `fwrite_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_FWRITE_UNLOCKED 1 +#endif + + +/* Define if your assembler supports specifying the alignment of objects + allocated using the GAS .comm command. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GAS_ALIGNED_COMM */ +#endif + + +/* Define if your Arm assembler permits context-specific feature extensions. + */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GAS_ARM_EXTENDED_ARCH */ +#endif + + +/* Define if your assembler supports .balign and .p2align. */ +#ifndef USED_FOR_TARGET +#define HAVE_GAS_BALIGN_AND_P2ALIGN 1 +#endif + + +/* Define 0/1 if your assembler supports CFI directives. */ +#define HAVE_GAS_CFI_DIRECTIVE 1 + +/* Define 0/1 if your assembler supports .cfi_personality. */ +#define HAVE_GAS_CFI_PERSONALITY_DIRECTIVE 1 + +/* Define 0/1 if your assembler supports .cfi_sections. */ +#define HAVE_GAS_CFI_SECTIONS_DIRECTIVE 1 + +/* Define if your assembler supports the .loc discriminator sub-directive. */ +#ifndef USED_FOR_TARGET +#define HAVE_GAS_DISCRIMINATOR 1 +#endif + + +/* Define if your assembler supports @gnu_unique_object. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GAS_GNU_UNIQUE_OBJECT */ +#endif + + +/* Define if your assembler and linker support .hidden. */ +#define HAVE_GAS_HIDDEN 1 + +/* Define if your assembler supports .lcomm with an alignment field. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GAS_LCOMM_WITH_ALIGNMENT */ +#endif + + +/* Define if your assembler supports .literal16. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GAS_LITERAL16 */ +#endif + + +/* Define if your assembler supports the .loc is_stmt sub-directive. */ +#ifndef USED_FOR_TARGET +#define HAVE_GAS_LOC_STMT 1 +#endif + + +/* Define if your assembler supports specifying the maximum number of bytes to + skip when using the GAS .p2align command. */ +#ifndef USED_FOR_TARGET +#define HAVE_GAS_MAX_SKIP_P2ALIGN 1 +#endif + + +/* Define if your assembler supports the .set micromips directive */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GAS_MICROMIPS */ +#endif + + +/* Define if your assembler supports .nsubspa comdat option. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GAS_NSUBSPA_COMDAT */ +#endif + + +/* Define if your assembler and linker support 32-bit section relative relocs + via '.secrel32 label'. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GAS_PE_SECREL32_RELOC */ +#endif + + +/* Define if your assembler supports specifying the exclude section flag. */ +#ifndef USED_FOR_TARGET +#define HAVE_GAS_SECTION_EXCLUDE 1 +#endif + + +/* Define 0/1 if your assembler supports 'o' flag in .section directive. */ +#ifndef USED_FOR_TARGET +#define HAVE_GAS_SECTION_LINK_ORDER 1 +#endif + + +/* Define 0/1 if your assembler supports marking sections with SHF_GNU_RETAIN + flag. */ +#ifndef USED_FOR_TARGET +#define HAVE_GAS_SHF_GNU_RETAIN 1 +#endif + + +/* Define 0/1 if your assembler supports marking sections with SHF_MERGE flag. + */ +#ifndef USED_FOR_TARGET +#define HAVE_GAS_SHF_MERGE 1 +#endif + + +/* Define if your assembler supports .subsection and .subsection -1 starts + emitting at the beginning of your section. */ +#ifndef USED_FOR_TARGET +#define HAVE_GAS_SUBSECTION_ORDERING 1 +#endif + + +/* Define if your assembler supports .weak. */ +#ifndef USED_FOR_TARGET +#define HAVE_GAS_WEAK 1 +#endif + + +/* Define if your assembler supports .weakref. */ +#ifndef USED_FOR_TARGET +#define HAVE_GAS_WEAKREF 1 +#endif + + +/* Define if your assembler has fixed global_load functions. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GCN_ASM_GLOBAL_LOAD_FIXED */ +#endif + + +/* Define if your assembler expects amdgcn_target gfx908+xnack syntax. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GCN_ASM_V3_SYNTAX */ +#endif + + +/* Define if your assembler expects amdgcn_target gfx908:xnack+ syntax. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GCN_ASM_V4_SYNTAX */ +#endif + + +/* Define if your assembler allows -mattr=+sramecc for fiji. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GCN_SRAM_ECC_FIJI */ +#endif + + +/* Define if your assembler allows -mattr=+sramecc for gfx900. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GCN_SRAM_ECC_GFX900 */ +#endif + + +/* Define if your assembler allows -mattr=+sramecc for gfx906. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GCN_SRAM_ECC_GFX906 */ +#endif + + +/* Define if your assembler allows -mattr=+sramecc for gfx908. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GCN_SRAM_ECC_GFX908 */ +#endif + + +/* Define if your assembler allows -mattr=+xnack for fiji. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GCN_XNACK_FIJI */ +#endif + + +/* Define if your assembler allows -mattr=+xnack for gfx900. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GCN_XNACK_GFX900 */ +#endif + + +/* Define if your assembler allows -mattr=+xnack for gfx906. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GCN_XNACK_GFX906 */ +#endif + + +/* Define if your assembler allows -mattr=+xnack for gfx908. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GCN_XNACK_GFX908 */ +#endif + + +/* Define to 1 if you have the `getchar_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_GETCHAR_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `getc_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_GETC_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `getrlimit' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_GETRLIMIT 1 +#endif + + +/* Define to 1 if you have the `getrusage' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_GETRUSAGE 1 +#endif + + +/* Define to 1 if you have the `gettimeofday' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_GETTIMEOFDAY 1 +#endif + + +/* Define to 1 if using GNU as. */ +#ifndef USED_FOR_TARGET +#define HAVE_GNU_AS 1 +#endif + + +/* Define if your system supports gnu indirect functions. */ +#ifndef USED_FOR_TARGET +#define HAVE_GNU_INDIRECT_FUNCTION 0 +#endif + + +/* Define to 1 if using GNU ld. */ +#ifndef USED_FOR_TARGET +#define HAVE_GNU_LD 1 +#endif + + +/* Define if the gold linker supports split stack and is available as a + non-default */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_GOLD_NON_DEFAULT_SPLIT_STACK */ +#endif + + +/* Define if you have the iconv() function. */ +#ifndef USED_FOR_TARGET +#define HAVE_ICONV 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_ICONV_H 1 +#endif + + +/* Define 0/1 if .init_array/.fini_array sections are available and working. + */ +#ifndef USED_FOR_TARGET +#define HAVE_INITFINI_ARRAY_SUPPORT 1 +#endif + + +/* Define to 1 if the system has the type `intmax_t'. */ +#ifndef USED_FOR_TARGET +#define HAVE_INTMAX_T 1 +#endif + + +/* Define to 1 if the system has the type `intptr_t'. */ +#ifndef USED_FOR_TARGET +#define HAVE_INTPTR_T 1 +#endif + + +/* Define if you have a working header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_INTTYPES_H 1 +#endif + + +/* Define to 1 if you have the `kill' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_KILL 1 +#endif + + +/* Define if you have and nl_langinfo(CODESET). */ +#ifndef USED_FOR_TARGET +#define HAVE_LANGINFO_CODESET 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_LANGINFO_H 1 +#endif + + +/* Define if your file defines LC_MESSAGES. */ +#ifndef USED_FOR_TARGET +#define HAVE_LC_MESSAGES 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_LDFCN_H */ +#endif + + +/* Define 0/1 if your linker supports the SHF_MERGE flag with section + alignment > 1. */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_ALIGNED_SHF_MERGE 1 +#endif + + +/* Define if your linker supports --as-needed/--no-as-needed or equivalent + options. */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_AS_NEEDED 1 +#endif + + +/* Define if your default avr linker script for avrxmega3 leaves .rodata in + flash. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_LD_AVR_AVRXMEGA3_RODATA_IN_FLASH */ +#endif + + +/* Define if your linker supports -z bndplt */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_BNDPLT_SUPPORT 1 +#endif + + +/* Define if the PE linker has broken DWARF 5 support. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_LD_BROKEN_PE_DWARF5 */ +#endif + + +/* Define if your linker supports --build-id. */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_BUILDID 1 +#endif + + +/* Define if the linker supports clearing hardware capabilities via mapfile. + */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_LD_CLEARCAP */ +#endif + + +/* Define to the level of your linker's compressed debug section support. */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_COMPRESS_DEBUG 3 +#endif + + +/* Define if your linker supports --demangle option. */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_DEMANGLE 1 +#endif + + +/* Define 0/1 if your linker supports CIE v3 in .eh_frame. */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_EH_FRAME_CIEV3 1 +#endif + + +/* Define if your linker supports .eh_frame_hdr. */ +#define HAVE_LD_EH_FRAME_HDR 1 + +/* Define if your linker supports garbage collection of sections in presence + of EH frames. */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_EH_GC_SECTIONS 1 +#endif + + +/* Define if your linker has buggy garbage collection of sections support when + .text.startup.foo like sections are used. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_LD_EH_GC_SECTIONS_BUG */ +#endif + + +/* Define if your PowerPC64 linker supports a large TOC. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_LD_LARGE_TOC */ +#endif + + +/* Define if your PowerPC64 linker only needs function descriptor syms. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_LD_NO_DOT_SYMS */ +#endif + + +/* Define if your linker can relax absolute .eh_frame personality pointers + into PC-relative form. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_LD_PERSONALITY_RELAXATION */ +#endif + + +/* Define if the PE linker supports --disable-dynamicbase option. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_LD_PE_DISABLE_DYNAMICBASE */ +#endif + + +/* Define if your linker supports PIE option. */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_PIE 1 +#endif + + +/* Define 0/1 if your linker supports -pie option with copy reloc. */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_PIE_COPYRELOC 1 +#endif + + +/* Define if your PowerPC linker has .gnu.attributes long double support. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE */ +#endif + + +/* Define if your linker supports --push-state/--pop-state */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_PUSHPOPSTATE_SUPPORT 1 +#endif + + +/* Define if your linker links a mix of read-only and read-write sections into + a read-write section. */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_RO_RW_SECTION_MIXING 1 +#endif + + +/* Define if your linker supports the *_sol2 emulations. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_LD_SOL2_EMULATION */ +#endif + + +/* Define if your linker supports -Bstatic/-Bdynamic or equivalent options. */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_STATIC_DYNAMIC 1 +#endif + + +/* Define if your linker supports --sysroot. */ +#ifndef USED_FOR_TARGET +#define HAVE_LD_SYSROOT 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_LIMITS_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_LOCALE_H 1 +#endif + + +/* Define to 1 if the system has the type `long long'. */ +#ifndef USED_FOR_TARGET +#define HAVE_LONG_LONG 1 +#endif + + +/* Define to 1 if the system has the type `long long int'. */ +#ifndef USED_FOR_TARGET +#define HAVE_LONG_LONG_INT 1 +#endif + + +/* Define to the level of your linker's plugin support. */ +#ifndef USED_FOR_TARGET +#define HAVE_LTO_PLUGIN 2 +#endif + + +/* Define to 1 if you have the `madvise' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_MADVISE 1 +#endif + + +/* Define to 1 if you have the `mallinfo' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_MALLINFO 1 +#endif + + +/* Define to 1 if you have the `mallinfo2' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_MALLINFO2 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_MALLOC_H 1 +#endif + + +/* Define to 1 if you have the `mbstowcs' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_MBSTOWCS 1 +#endif + + +/* Define if valgrind's memcheck.h header is installed. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_MEMCHECK_H */ +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_MEMORY_H 1 +#endif + + +/* Define to 1 if you have the `mmap' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_MMAP 1 +#endif + + +/* Define if mmap with MAP_ANON(YMOUS) works. */ +#ifndef USED_FOR_TARGET +#define HAVE_MMAP_ANON 1 +#endif + + +/* Define if mmap of /dev/zero works. */ +#ifndef USED_FOR_TARGET +#define HAVE_MMAP_DEV_ZERO 1 +#endif + + +/* Define if read-only mmap of a plain file works. */ +#ifndef USED_FOR_TARGET +#define HAVE_MMAP_FILE 1 +#endif + + +/* Define if GCC has been configured with --enable-newlib-nano-formatted-io. + */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_NEWLIB_NANO_FORMATTED_IO */ +#endif + + +/* Define to 1 if you have the `nl_langinfo' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_NL_LANGINFO 1 +#endif + + +/* Define to 1 if you have the `popen' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_POPEN 1 +#endif + + +/* Define to 1 if you have the `posix_fallocate' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_POSIX_FALLOCATE 1 +#endif + + +/* Define to 1 if you have the `putchar_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_PUTCHAR_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `putc_unlocked' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_PUTC_UNLOCKED 1 +#endif + + +/* Define to 1 if you have the `setlocale' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_SETLOCALE 1 +#endif + + +/* Define to 1 if you have the `setrlimit' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_SETRLIMIT 1 +#endif + + +/* Define if defines sighandler_t */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_SIGHANDLER_T */ +#endif + + +/* Define if the system-provided CRTs are present on Solaris. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_SOLARIS_CRTS */ +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_STDDEF_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_STDINT_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_STDLIB_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_STRINGS_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_STRING_H 1 +#endif + + +/* Define to 1 if you have the `strsignal' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_STRSIGNAL 1 +#endif + + +/* Define if defines struct tms. */ +#ifndef USED_FOR_TARGET +#define HAVE_STRUCT_TMS 1 +#endif + + +/* Define if defines std::swap. */ +#ifndef USED_FOR_TARGET +#define HAVE_SWAP_IN_UTILITY 1 +#endif + + +/* Define to 1 if you have the `sysconf' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_SYSCONF 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_SYS_FILE_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_SYS_LOCKING_H */ +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_SYS_MMAN_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_SYS_PARAM_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_SYS_RESOURCE_H 1 +#endif + + +/* Define if your target C library provides sys/sdt.h */ +/* #undef HAVE_SYS_SDT_H */ + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_SYS_STAT_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_SYS_TIMES_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_SYS_TIME_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_SYS_TYPES_H 1 +#endif + + +/* Define to 1 if you have that is POSIX.1 compatible. */ +#ifndef USED_FOR_TARGET +#define HAVE_SYS_WAIT_H 1 +#endif + + +/* Define to 1 if you have the `times' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_TIMES 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_TIME_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_TR1_UNORDERED_MAP 1 +#endif + + +/* Define to 1 if the system has the type `uintmax_t'. */ +#ifndef USED_FOR_TARGET +#define HAVE_UINTMAX_T 1 +#endif + + +/* Define to 1 if the system has the type `uintptr_t'. */ +#ifndef USED_FOR_TARGET +#define HAVE_UINTPTR_T 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_UNISTD_H 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_UNORDERED_MAP 1 +#endif + + +/* Define to 1 if the system has the type `unsigned long long int'. */ +#ifndef USED_FOR_TARGET +#define HAVE_UNSIGNED_LONG_LONG_INT 1 +#endif + + +/* Define if valgrind's valgrind/memcheck.h header is installed. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_VALGRIND_MEMCHECK_H */ +#endif + + +/* Define to 1 if you have the `vfork' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_VFORK 1 +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_VFORK_H */ +#endif + + +/* Define to 1 if you have the header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_WCHAR_H 1 +#endif + + +/* Define to 1 if you have the `wcswidth' function. */ +#ifndef USED_FOR_TARGET +#define HAVE_WCSWIDTH 1 +#endif + + +/* Define to 1 if `fork' works. */ +#ifndef USED_FOR_TARGET +#define HAVE_WORKING_FORK 1 +#endif + + +/* Define this macro if mbstowcs does not crash when its first argument is + NULL. */ +#ifndef USED_FOR_TARGET +#define HAVE_WORKING_MBSTOWCS 1 +#endif + + +/* Define to 1 if `vfork' works. */ +#ifndef USED_FOR_TARGET +#define HAVE_WORKING_VFORK 1 +#endif + + +/* Define if your assembler supports AIX debug frame section label reference. + */ +#ifndef USED_FOR_TARGET +/* #undef HAVE_XCOFF_DWARF_EXTRAS */ +#endif + + +/* Define if you have a working header file. */ +#ifndef USED_FOR_TARGET +#define HAVE_ZSTD_H 1 +#endif + + +/* Define if isl is in use. */ +#ifndef USED_FOR_TARGET +#define HAVE_isl 1 +#endif + + +/* Define if F_SETLKW supported by fcntl. */ +#ifndef USED_FOR_TARGET +#define HOST_HAS_F_SETLKW 1 +#endif + + +/* Define if _LK_LOC supported by _locking. */ +#ifndef USED_FOR_TARGET +/* #undef HOST_HAS_LK_LOCK */ +#endif + + +/* Define if O_CLOEXEC supported by fcntl. */ +#ifndef USED_FOR_TARGET +#define HOST_HAS_O_CLOEXEC 1 +#endif + + +/* Define as const if the declaration of iconv() needs const. */ +#ifndef USED_FOR_TARGET +#define ICONV_CONST +#endif + + +/* Define if int64_t uses long as underlying type. */ +#ifndef USED_FOR_TARGET +#define INT64_T_IS_LONG 1 +#endif + + +/* Define to 1 if ld64 supports '-export_dynamic'. */ +#ifndef USED_FOR_TARGET +/* #undef LD64_HAS_EXPORT_DYNAMIC */ +#endif + + +/* Define to 1 if ld64 supports '-platform_version'. */ +#ifndef USED_FOR_TARGET +/* #undef LD64_HAS_PLATFORM_VERSION */ +#endif + + +/* Define to ld64 version. */ +#ifndef USED_FOR_TARGET +/* #undef LD64_VERSION */ +#endif + + +/* Define to the linker option to ignore unused dependencies. */ +#ifndef USED_FOR_TARGET +#define LD_AS_NEEDED_OPTION "--push-state --as-needed" +#endif + + +/* Define to the linker option to enable compressed debug sections. */ +#ifndef USED_FOR_TARGET +#define LD_COMPRESS_DEBUG_OPTION "--compress-debug-sections" +#endif + + +/* Define to the linker option to enable use of shared objects. */ +#ifndef USED_FOR_TARGET +#define LD_DYNAMIC_OPTION "-Bdynamic" +#endif + + +/* Define to the linker option to keep unused dependencies. */ +#ifndef USED_FOR_TARGET +#define LD_NO_AS_NEEDED_OPTION "--pop-state" +#endif + + +/* Define to the linker option to disable use of shared objects. */ +#ifndef USED_FOR_TARGET +#define LD_STATIC_OPTION "-Bstatic" +#endif + + +/* The linker hash style */ +#ifndef USED_FOR_TARGET +/* #undef LINKER_HASH_STYLE */ +#endif + + +/* Define to the name of the LTO plugin DSO that must be passed to the + linker's -plugin=LIB option. */ +#ifndef USED_FOR_TARGET +#define LTOPLUGINSONAME "liblto_plugin.so" +#endif + + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#ifndef USED_FOR_TARGET +#define LT_OBJDIR ".libs/" +#endif + + +/* Define if we should link mingw executables with --large-address-aware */ +#ifndef USED_FOR_TARGET +/* #undef MINGW_DEFAULT_LARGE_ADDR_AWARE */ +#endif + + +/* Value to set mingw's _dowildcard to. */ +#ifndef USED_FOR_TARGET +/* #undef MINGW_DOWILDCARD */ +#endif + + +/* Define if host mkdir takes a single argument. */ +#ifndef USED_FOR_TARGET +/* #undef MKDIR_TAKES_ONE_ARG */ +#endif + + +/* Define to 1 to if -foffload is defaulted */ +#ifndef USED_FOR_TARGET +/* #undef OFFLOAD_DEFAULTED */ +#endif + + +/* Define to offload targets, separated by commas. */ +#ifndef USED_FOR_TARGET +#define OFFLOAD_TARGETS "" +#endif + + +/* Define to the address where bug reports for this package should be sent. */ +#ifndef USED_FOR_TARGET +#define PACKAGE_BUGREPORT "" +#endif + + +/* Define to the full name of this package. */ +#ifndef USED_FOR_TARGET +#define PACKAGE_NAME "" +#endif + + +/* Define to the full name and version of this package. */ +#ifndef USED_FOR_TARGET +#define PACKAGE_STRING "" +#endif + + +/* Define to the one symbol short name of this package. */ +#ifndef USED_FOR_TARGET +#define PACKAGE_TARNAME "" +#endif + + +/* Define to the home page for this package. */ +#ifndef USED_FOR_TARGET +#define PACKAGE_URL "" +#endif + + +/* Define to the version of this package. */ +#ifndef USED_FOR_TARGET +#define PACKAGE_VERSION "" +#endif + + +/* Specify plugin linker */ +#ifndef USED_FOR_TARGET +#define PLUGIN_LD_SUFFIX "ld" +#endif + + +/* Define to .TOC. alignment forced by your linker. */ +#ifndef USED_FOR_TARGET +/* #undef POWERPC64_TOC_POINTER_ALIGNMENT */ +#endif + + +/* Define to PREFIX/include if cpp should also search that directory. */ +#ifndef USED_FOR_TARGET +/* #undef PREFIX_INCLUDE_DIR */ +#endif + + +/* The size of `dev_t', as computed by sizeof. */ +#ifndef USED_FOR_TARGET +#define SIZEOF_DEV_T 8 +#endif + + +/* The size of `ino_t', as computed by sizeof. */ +#ifndef USED_FOR_TARGET +#define SIZEOF_INO_T 8 +#endif + + +/* The size of `int', as computed by sizeof. */ +#ifndef USED_FOR_TARGET +#define SIZEOF_INT 4 +#endif + + +/* The size of `long', as computed by sizeof. */ +#ifndef USED_FOR_TARGET +#define SIZEOF_LONG 8 +#endif + + +/* The size of `long long', as computed by sizeof. */ +#ifndef USED_FOR_TARGET +#define SIZEOF_LONG_LONG 8 +#endif + + +/* The size of `short', as computed by sizeof. */ +#ifndef USED_FOR_TARGET +#define SIZEOF_SHORT 2 +#endif + + +/* The size of `void *', as computed by sizeof. */ +#ifndef USED_FOR_TARGET +#define SIZEOF_VOID_P 8 +#endif + + +/* Define to 1 if you have the ANSI C header files. */ +#ifndef USED_FOR_TARGET +#define STDC_HEADERS 1 +#endif + + +/* Define if you can safely include both and . */ +#ifndef USED_FOR_TARGET +#define STRING_WITH_STRINGS 1 +#endif + + +/* Define if TFmode long double should be the default */ +#ifndef USED_FOR_TARGET +/* #undef TARGET_DEFAULT_LONG_DOUBLE_128 */ +#endif + + +/* Define if your target C library provides the `dl_iterate_phdr' function. */ +#define TARGET_DL_ITERATE_PHDR 1 + +/* GNU C Library major version number used on the target, or 0. */ +#ifndef USED_FOR_TARGET +#define TARGET_GLIBC_MAJOR 0 +#endif + + +/* GNU C Library minor version number used on the target, or 0. */ +#ifndef USED_FOR_TARGET +#define TARGET_GLIBC_MINOR 0 +#endif + + +/* Define if your target C Library properly handles PT_GNU_STACK */ +#ifndef USED_FOR_TARGET +/* #undef TARGET_LIBC_GNUSTACK */ +#endif + + +/* Define if your target C Library provides the AT_HWCAP value in the TCB */ +#ifndef USED_FOR_TARGET +/* #undef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */ +#endif + + +/* Define if your target C library provides stack protector support */ +#ifndef USED_FOR_TARGET +#define TARGET_LIBC_PROVIDES_SSP 1 +#endif + + +/* Define to 1 if you can safely include both and . */ +#ifndef USED_FOR_TARGET +#define TIME_WITH_SYS_TIME 1 +#endif + + +/* Define to the flag used to mark TLS sections if the default (`T') doesn't + work. */ +#ifndef USED_FOR_TARGET +/* #undef TLS_SECTION_ASM_FLAG */ +#endif + + +/* Define if your assembler mis-optimizes .eh_frame data. */ +#ifndef USED_FOR_TARGET +/* #undef USE_AS_TRADITIONAL_FORMAT */ +#endif + + +/* Define if you want to generate code by default that assumes that the Cygwin + DLL exports wrappers to support libstdc++ function replacement. */ +#ifndef USED_FOR_TARGET +/* #undef USE_CYGWIN_LIBSTDCXX_WRAPPERS */ +#endif + + +/* Define 0/1 if your linker supports hidden thunks in linkonce sections. */ +#ifndef USED_FOR_TARGET +/* #undef USE_HIDDEN_LINKONCE */ +#endif + + +/* Define to 1 if the 'long long' type is wider than 'long' but still + efficiently supported by the host hardware. */ +#ifndef USED_FOR_TARGET +/* #undef USE_LONG_LONG_FOR_WIDEST_FAST_INT */ +#endif + + +/* Define if we should use leading underscore on 64 bit mingw targets */ +#ifndef USED_FOR_TARGET +/* #undef USE_MINGW64_LEADING_UNDERSCORES */ +#endif + + +/* Enable extensions on AIX 3, Interix. */ +#ifndef _ALL_SOURCE +# define _ALL_SOURCE 1 +#endif +/* Enable GNU extensions on systems that have them. */ +#ifndef _GNU_SOURCE +# define _GNU_SOURCE 1 +#endif +/* Enable threading extensions on Solaris. */ +#ifndef _POSIX_PTHREAD_SEMANTICS +# define _POSIX_PTHREAD_SEMANTICS 1 +#endif +/* Enable extensions on HP NonStop. */ +#ifndef _TANDEM_SOURCE +# define _TANDEM_SOURCE 1 +#endif +/* Enable general extensions on Solaris. */ +#ifndef __EXTENSIONS__ +# define __EXTENSIONS__ 1 +#endif + + +/* Define to be the last component of the Windows registry key under which to + look for installation paths. The full key used will be + HKEY_LOCAL_MACHINE/SOFTWARE/Free Software Foundation/{WIN32_REGISTRY_KEY}. + The default is the GCC version number. */ +#ifndef USED_FOR_TARGET +/* #undef WIN32_REGISTRY_KEY */ +#endif + + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Enable large inode numbers on Mac OS X 10.5. */ +#ifndef _DARWIN_USE_64_BIT_INODE +# define _DARWIN_USE_64_BIT_INODE 1 +#endif + +/* Number of bits in a file offset, on hosts where this is settable. */ +#ifndef USED_FOR_TARGET +/* #undef _FILE_OFFSET_BITS */ +#endif + + +/* Define for large files, on AIX-style hosts. */ +#ifndef USED_FOR_TARGET +/* #undef _LARGE_FILES */ +#endif + + +/* Define to 1 if on MINIX. */ +#ifndef USED_FOR_TARGET +/* #undef _MINIX */ +#endif + + +/* Define to 2 if the system does not provide POSIX.1 features except with + this defined. */ +#ifndef USED_FOR_TARGET +/* #undef _POSIX_1_SOURCE */ +#endif + + +/* Define to 1 if you need to in order for `stat' and other things to work. */ +#ifndef USED_FOR_TARGET +/* #undef _POSIX_SOURCE */ +#endif + + +/* Define for Solaris 2.5.1 so the uint32_t typedef from , + , or is not used. If the typedef were allowed, the + #define below would cause a syntax error. */ +#ifndef USED_FOR_TARGET +/* #undef _UINT32_T */ +#endif + + +/* Define for Solaris 2.5.1 so the uint64_t typedef from , + , or is not used. If the typedef were allowed, the + #define below would cause a syntax error. */ +#ifndef USED_FOR_TARGET +/* #undef _UINT64_T */ +#endif + + +/* Define for Solaris 2.5.1 so the uint8_t typedef from , + , or is not used. If the typedef were allowed, the + #define below would cause a syntax error. */ +#ifndef USED_FOR_TARGET +/* #undef _UINT8_T */ +#endif + + +/* Define to `char *' if does not define. */ +#ifndef USED_FOR_TARGET +/* #undef caddr_t */ +#endif + + +/* Define to `__inline__' or `__inline' if that's what the C compiler + calls it, or to nothing if 'inline' is not supported under any name. */ +#ifndef __cplusplus +/* #undef inline */ +#endif + +/* Define to the type of a signed integer type of width exactly 16 bits if + such a type exists and the standard includes do not define it. */ +#ifndef USED_FOR_TARGET +/* #undef int16_t */ +#endif + + +/* Define to the type of a signed integer type of width exactly 32 bits if + such a type exists and the standard includes do not define it. */ +#ifndef USED_FOR_TARGET +/* #undef int32_t */ +#endif + + +/* Define to the type of a signed integer type of width exactly 64 bits if + such a type exists and the standard includes do not define it. */ +#ifndef USED_FOR_TARGET +/* #undef int64_t */ +#endif + + +/* Define to the type of a signed integer type of width exactly 8 bits if such + a type exists and the standard includes do not define it. */ +#ifndef USED_FOR_TARGET +/* #undef int8_t */ +#endif + + +/* Define to the widest signed integer type if and do + not define. */ +#ifndef USED_FOR_TARGET +/* #undef intmax_t */ +#endif + + +/* Define to the type of a signed integer type wide enough to hold a pointer, + if such a type exists, and if the system does not define it. */ +#ifndef USED_FOR_TARGET +/* #undef intptr_t */ +#endif + + +/* Define to `int' if does not define. */ +#ifndef USED_FOR_TARGET +/* #undef pid_t */ +#endif + + +/* Define to `long' if doesn't define. */ +#ifndef USED_FOR_TARGET +/* #undef rlim_t */ +#endif + + +/* Define to `int' if does not define. */ +#ifndef USED_FOR_TARGET +/* #undef ssize_t */ +#endif + + +/* Define to the type of an unsigned integer type of width exactly 16 bits if + such a type exists and the standard includes do not define it. */ +#ifndef USED_FOR_TARGET +/* #undef uint16_t */ +#endif + + +/* Define to the type of an unsigned integer type of width exactly 32 bits if + such a type exists and the standard includes do not define it. */ +#ifndef USED_FOR_TARGET +/* #undef uint32_t */ +#endif + + +/* Define to the type of an unsigned integer type of width exactly 64 bits if + such a type exists and the standard includes do not define it. */ +#ifndef USED_FOR_TARGET +/* #undef uint64_t */ +#endif + + +/* Define to the type of an unsigned integer type of width exactly 8 bits if + such a type exists and the standard includes do not define it. */ +#ifndef USED_FOR_TARGET +/* #undef uint8_t */ +#endif + + +/* Define to the widest unsigned integer type if and + do not define. */ +#ifndef USED_FOR_TARGET +/* #undef uintmax_t */ +#endif + + +/* Define to the type of an unsigned integer type wide enough to hold a + pointer, if such a type exists, and if the system does not define it. */ +#ifndef USED_FOR_TARGET +/* #undef uintptr_t */ +#endif + + +/* Define as `fork' if `vfork' does not work. */ +#ifndef USED_FOR_TARGET +/* #undef vfork */ +#endif + diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/auto-profile.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/auto-profile.h new file mode 100644 index 0000000..bf3f90f --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/auto-profile.h @@ -0,0 +1,31 @@ +/* auto-profile.h - Defines data exported from auto-profile.cc + Copyright (C) 2014-2022 Free Software Foundation, Inc. + Contributed by Dehao Chen (dehao@google.com) + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef AUTO_PROFILE_H +#define AUTO_PROFILE_H + +/* Read, process, finalize AutoFDO data structures. */ +extern void read_autofdo_file (void); +extern void end_auto_profile (void); + +/* Returns TRUE if EDGE is hot enough to be inlined early. */ +extern bool afdo_callsite_hot_enough_for_early_inline (struct cgraph_edge *); + +#endif /* AUTO_PROFILE_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/b-header-vars b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/b-header-vars new file mode 100644 index 0000000..147d828 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/b-header-vars @@ -0,0 +1,95 @@ +USER_H=float.h iso646.h stdarg.h stdbool.h stddef.h varargs.h stdfix.h stdnoreturn.h stdalign.h stdatomic.h config/i386/cpuid.h mmintrin.h mm3dnow.h xmmintrin.h emmintrin.h pmmintrin.h tmmintrin.h ammintrin.h smmintrin.h nmmintrin.h bmmintrin.h fma4intrin.h wmmintrin.h immintrin.h x86intrin.h avxintrin.h xopintrin.h ia32intrin.h cross-stdarg.h lwpintrin.h popcntintrin.h lzcntintrin.h bmiintrin.h bmi2intrin.h tbmintrin.h avx2intrin.h avx512fintrin.h fmaintrin.h f16cintrin.h rtmintrin.h xtestintrin.h rdseedintrin.h prfchwintrin.h adxintrin.h fxsrintrin.h xsaveintrin.h xsaveoptintrin.h avx512cdintrin.h avx512erintrin.h avx512pfintrin.h shaintrin.h clflushoptintrin.h xsavecintrin.h xsavesintrin.h avx512dqintrin.h avx512bwintrin.h avx512vlintrin.h avx512vlbwintrin.h avx512vldqintrin.h avx512ifmaintrin.h avx512ifmavlintrin.h avx512vbmiintrin.h avx512vbmivlintrin.h avx5124fmapsintrin.h avx5124vnniwintrin.h avx512vpopcntdqintrin.h clwbintrin.h mwaitxintrin.h clzerointrin.h pkuintrin.h sgxintrin.h cetintrin.h gfniintrin.h cet.h avx512vbmi2intrin.h avx512vbmi2vlintrin.h avx512vnniintrin.h avx512vnnivlintrin.h vaesintrin.h vpclmulqdqintrin.h avx512vpopcntdqvlintrin.h avx512bitalgintrin.h pconfigintrin.h wbnoinvdintrin.h movdirintrin.h waitpkgintrin.h cldemoteintrin.h avx512bf16vlintrin.h avx512bf16intrin.h enqcmdintrin.h serializeintrin.h avx512vp2intersectintrin.h avx512vp2intersectvlintrin.h tsxldtrkintrin.h amxtileintrin.h amxint8intrin.h amxbf16intrin.h x86gprintrin.h uintrintrin.h hresetintrin.h keylockerintrin.h avxvnniintrin.h mwaitintrin.h avx512fp16intrin.h avx512fp16vlintrin.h mm_malloc.h +T_GLIMITS_H=glimits.h +T_STDINT_GCC_H=stdint-gcc.h +HASHTAB_H=hashtab.h +OBSTACK_H=obstack.h +SPLAY_TREE_H=splay-tree.h +MD5_H=md5.h +XREGEX_H=xregex.h +FNMATCH_H=fnmatch.h +LINKER_PLUGIN_API_H=plugin-api.h +BCONFIG_H=bconfig.h auto-host.h ansidecl.h +CONFIG_H=config.h auto-host.h ansidecl.h +TCONFIG_H=tconfig.h auto-host.h ansidecl.h +TM_P_H=tm_p.h config/i386/i386-protos.h linux-protos.h tm-preds.h +TM_D_H=tm_d.h config/i386/i386.h i386-protos.h defaults.h +GTM_H=tm.h options.h config/vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h +TM_H=tm.h options.h config/vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def +DUMPFILE_H=line-map.h dumpfile.h +VEC_H=vec.h statistics.h ggc.h gtype-desc.h statistics.h +HASH_TABLE_H=hashtab.h hash-table.h ggc.h gtype-desc.h statistics.h +EXCEPT_H=except.h hashtab.h +TARGET_H=tm.h options.h config/vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def target.h target.def target-hooks-macros.h target-insns.def insn-modes.h insn-codes.h +C_TARGET_H=c-family/c-target.h c-family/c-target.def target-hooks-macros.h +COMMON_TARGET_H=common/common-target.h line-map.h input.h common/common-target.def target-hooks-macros.h +D_TARGET_H=d/d-target.h d/d-target.def target-hooks-macros.h +MACHMODE_H=machmode.h mode-classes.def +HOOKS_H=hooks.h +HOSTHOOKS_DEF_H=hosthooks-def.h hooks.h +LANGHOOKS_DEF_H=langhooks-def.h hooks.h +TARGET_DEF_H=target-def.h target-hooks-def.h hooks.h targhooks.h +C_TARGET_DEF_H=c-family/c-target-def.h c-family/c-target-hooks-def.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/i386/i386-opts.h stringop.def real.h fixed-value.h tree-check.h c-family/c-common.h c-family/c-common.def tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h splay-tree.h line-map.h cpplib.h ggc.h gtype-desc.h statistics.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def hooks.h common/common-targhooks.h +CORETYPES_H=coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h +RTL_BASE_H=coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h rtl.h rtl.def reg-notes.def insn-notes.def line-map.h input.h real.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h fixed-value.h alias.h hashtab.h +FIXED_VALUE_H=fixed-value.h +RTL_H=coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h rtl.h rtl.def reg-notes.def insn-notes.def line-map.h input.h real.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h fixed-value.h alias.h hashtab.h flags.h flag-types.h options.h flag-types.h config/i386/i386-opts.h stringop.def genrtl.h +READ_MD_H=obstack.h hashtab.h read-md.h +INTERNAL_FN_H=internal-fn.h internal-fn.def +TREE_CORE_H=tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/i386/i386-opts.h stringop.def real.h fixed-value.h +TREE_H=tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/i386/i386-opts.h stringop.def real.h fixed-value.h tree-check.h +REGSET_H=regset.h bitmap.h hashtab.h statistics.h hard-reg-set.h +BASIC_BLOCK_H=basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h +GIMPLE_H=gimple.h gimple.def gsstruct.def vec.h statistics.h ggc.h gtype-desc.h statistics.h ggc.h gtype-desc.h statistics.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h tree-ssa-operands.h tree-ssa-alias.h internal-fn.h internal-fn.def hashtab.h hash-table.h ggc.h gtype-desc.h statistics.h is-a.h +GCOV_IO_H=gcov-io.h version.h auto-host.h gcov-counter.def +RECOG_H=recog.h +EMIT_RTL_H=emit-rtl.h +FLAGS_H=flags.h flag-types.h options.h flag-types.h config/i386/i386-opts.h stringop.def +OPTIONS_H=options.h flag-types.h config/i386/i386-opts.h stringop.def +FUNCTION_H=function.h hashtab.h tm.h options.h config/vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h +EXPR_H=expr.h insn-config.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h rtl.h rtl.def reg-notes.def insn-notes.def line-map.h input.h real.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h fixed-value.h alias.h hashtab.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def genrtl.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h emit-rtl.h +OPTABS_H=optabs.h insn-codes.h insn-opinit.h +REGS_H=regs.h hard-reg-set.h +CFGLOOP_H=cfgloop.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h bitmap.h hashtab.h statistics.h sbitmap.h +IPA_UTILS_H=ipa-utils.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/i386/i386-opts.h stringop.def real.h fixed-value.h tree-check.h cgraph.h vec.h statistics.h ggc.h gtype-desc.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cif-code.def ipa-ref.h plugin-api.h is-a.h +IPA_REFERENCE_H=ipa-reference.h bitmap.h hashtab.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/i386/i386-opts.h stringop.def real.h fixed-value.h tree-check.h +CGRAPH_H=cgraph.h vec.h statistics.h ggc.h gtype-desc.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/i386/i386-opts.h stringop.def real.h fixed-value.h tree-check.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cif-code.def ipa-ref.h plugin-api.h is-a.h +DF_H=df.h bitmap.h hashtab.h statistics.h regset.h bitmap.h hashtab.h statistics.h hard-reg-set.h sbitmap.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h alloc-pool.h timevar.h timevar.def +RESOURCE_H=resource.h hard-reg-set.h df.h bitmap.h hashtab.h statistics.h regset.h bitmap.h hashtab.h statistics.h hard-reg-set.h sbitmap.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h alloc-pool.h timevar.h timevar.def +GCC_H=gcc.h version.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def +GGC_H=ggc.h gtype-desc.h statistics.h +TIMEVAR_H=timevar.h timevar.def +INSN_ATTR_H=insn-attr.h insn-attr-common.h insn-addr.h +INSN_ADDR_H=insn-addr.h +C_COMMON_H=c-family/c-common.h c-family/c-common.def tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/i386/i386-opts.h stringop.def real.h fixed-value.h tree-check.h splay-tree.h line-map.h cpplib.h ggc.h gtype-desc.h statistics.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def +C_PRAGMA_H=c-family/c-pragma.h line-map.h cpplib.h +C_TREE_H=c/c-tree.h c-family/c-common.h c-family/c-common.def tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/i386/i386-opts.h stringop.def real.h fixed-value.h tree-check.h splay-tree.h line-map.h cpplib.h ggc.h gtype-desc.h statistics.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def diagnostic.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def pretty-print.h line-map.h input.h obstack.h wide-int-print.h +SYSTEM_H=system.h hwint.h libiberty.h safe-ctype.h filenames.h +PREDICT_H=predict.h predict.def +CPPLIB_H=line-map.h cpplib.h +CODYLIB_H=cody.hh +INPUT_H=line-map.h input.h +OPTS_H=line-map.h input.h vec.h statistics.h ggc.h gtype-desc.h statistics.h opts.h obstack.h +SYMTAB_H=symtab.h obstack.h +CPP_INTERNAL_H=internal.h +TREE_DUMP_H=tree-dump.h splay-tree.h line-map.h dumpfile.h +TREE_PASS_H=tree-pass.h timevar.h timevar.def line-map.h dumpfile.h +TREE_SSA_H=tree-ssa.h tree-ssa-operands.h bitmap.h hashtab.h statistics.h sbitmap.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h gimple.h gimple.def gsstruct.def vec.h statistics.h ggc.h gtype-desc.h statistics.h ggc.h gtype-desc.h statistics.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h tree-ssa-operands.h tree-ssa-alias.h internal-fn.h internal-fn.def hashtab.h hash-table.h ggc.h gtype-desc.h statistics.h is-a.h hashtab.h cgraph.h vec.h statistics.h ggc.h gtype-desc.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cif-code.def ipa-ref.h plugin-api.h is-a.h ipa-reference.h bitmap.h hashtab.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h tree-ssa-alias.h +PRETTY_PRINT_H=pretty-print.h line-map.h input.h obstack.h wide-int-print.h +TREE_PRETTY_PRINT_H=tree-pretty-print.h pretty-print.h line-map.h input.h obstack.h wide-int-print.h +GIMPLE_PRETTY_PRINT_H=gimple-pretty-print.h tree-pretty-print.h pretty-print.h line-map.h input.h obstack.h wide-int-print.h +DIAGNOSTIC_CORE_H=diagnostic-core.h line-map.h input.h bversion.h diagnostic.def +DIAGNOSTIC_H=diagnostic.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def pretty-print.h line-map.h input.h obstack.h wide-int-print.h +C_PRETTY_PRINT_H=c-family/c-pretty-print.h pretty-print.h line-map.h input.h obstack.h wide-int-print.h c-family/c-common.h c-family/c-common.def tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/i386/i386-opts.h stringop.def real.h fixed-value.h tree-check.h splay-tree.h line-map.h cpplib.h ggc.h gtype-desc.h statistics.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h +TREE_INLINE_H=tree-inline.h +REAL_H=real.h +LTO_STREAMER_H=lto-streamer.h plugin-api.h tm.h options.h config/vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def target.h target.def target-hooks-macros.h target-insns.def insn-modes.h insn-codes.h cgraph.h vec.h statistics.h ggc.h gtype-desc.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cif-code.def ipa-ref.h plugin-api.h is-a.h vec.h statistics.h ggc.h gtype-desc.h statistics.h hashtab.h hash-table.h ggc.h gtype-desc.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h gimple.h gimple.def gsstruct.def vec.h statistics.h ggc.h gtype-desc.h statistics.h ggc.h gtype-desc.h statistics.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h tree-ssa-operands.h tree-ssa-alias.h internal-fn.h internal-fn.def hashtab.h hash-table.h ggc.h gtype-desc.h statistics.h is-a.h gcov-io.h version.h auto-host.h gcov-counter.def diagnostic.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def pretty-print.h line-map.h input.h obstack.h wide-int-print.h alloc-pool.h +IPA_PROP_H=ipa-prop.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/i386/i386-opts.h stringop.def real.h fixed-value.h tree-check.h vec.h statistics.h ggc.h gtype-desc.h statistics.h cgraph.h vec.h statistics.h ggc.h gtype-desc.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def common/config/i386/i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cif-code.def ipa-ref.h plugin-api.h is-a.h gimple.h gimple.def gsstruct.def vec.h statistics.h ggc.h gtype-desc.h statistics.h ggc.h gtype-desc.h statistics.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h biarch64.h i386.h unix.h att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h x86-64.h gnu-user-common.h gnu-user64.h linux.h linux-android.h linux-common.h linux64.h initfini-array.h defaults.h insn-constants.h insn-flags.h options.h flag-types.h i386-opts.h stringop.def x86-tune.def i386-cpuinfo.h i386-isa.def hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h i386-opts.h stringop.def real.h fixed-value.h tree-check.h tree-ssa-operands.h tree-ssa-alias.h internal-fn.h internal-fn.def hashtab.h hash-table.h ggc.h gtype-desc.h statistics.h is-a.h alloc-pool.h +BITMAP_H=bitmap.h hashtab.h statistics.h +GCC_PLUGIN_H=gcc-plugin.h highlev-plugin-common.h plugin.def config.h auto-host.h ansidecl.h system.h hwint.h libiberty.h safe-ctype.h filenames.h hashtab.h +PLUGIN_H=plugin.h gcc-plugin.h highlev-plugin-common.h plugin.def config.h auto-host.h ansidecl.h system.h hwint.h libiberty.h safe-ctype.h filenames.h hashtab.h +PLUGIN_VERSION_H=plugin-version.h configargs.h +CONTEXT_H=context.h +GENSUPPORT_H=gensupport.h read-md.h optabs.def +RTL_SSA_H=pretty-print.h line-map.h input.h obstack.h wide-int-print.h insn-config.h splay-tree-utils.h recog.h regs.h hard-reg-set.h function-abi.h obstack-utils.h mux-utils.h rtlanal.h memmodel.h emit-rtl.h rtl-ssa/accesses.h rtl-ssa/insns.h rtl-ssa/blocks.h rtl-ssa/changes.h rtl-ssa/functions.h rtl-ssa/is-a.inl rtl-ssa/access-utils.h rtl-ssa/insn-utils.h rtl-ssa/movement.h rtl-ssa/change-utils.h rtl-ssa/member-fns.inl +GTFILES_H=gt-coverage.h gt-symtab-thunks.h gt-caller-save.h gt-symtab.h gt-alias.h gt-bitmap.h gt-cselib.h gt-cgraph.h gt-ipa-prop.h gt-ipa-cp.h gt-ipa-sra.h gt-dbxout.h gt-ipa-modref.h gt-diagnostic-spec.h gt-dwarf2asm.h gt-dwarf2cfi.h gt-dwarf2ctf.h gt-dwarf2out.h gt-ctfout.h gt-btfout.h gt-tree-vect-generic.h gt-gimple-isel.h gt-dojump.h gt-emit-rtl.h gt-explow.h gt-expr.h gt-function.h gt-except.h gt-ggc-tests.h gt-gcse.h gt-godump.h gt-lists.h gt-optabs-libfuncs.h gt-profile.h gt-mcf.h gt-reg-stack.h gt-cfgrtl.h gt-stor-layout.h gt-stringpool.h gt-tree.h gt-varasm.h gt-tree-ssanames.h gt-tree-eh.h gt-tree-ssa-address.h gt-tree-cfg.h gt-tree-ssa-loop-ivopts.h gt-tree-dfa.h gt-tree-iterator.h gt-gimple-expr.h gt-tree-scalar-evolution.h gt-tree-profile.h gt-tree-nested.h gt-omp-general.h gt-omp-low.h gt-targhooks.h gt-i386.h gt-passes.h gt-cgraphclones.h gt-tree-phinodes.h gt-trans-mem.h gt-vtable-verify.h gt-asan.h gt-ubsan.h gt-tsan.h gt-sanopt.h gt-sancov.h gt-ipa-devirt.h gt-calls.h gt-i386-builtins.h gt-i386-expand.h gt-i386-options.h gt-ada-decl.h gt-ada-trans.h gt-ada-utils.h gt-ada-misc.h gt-c-c-lang.h gt-c-c-decl.h gt-c-family-c-common.h gt-c-family-c-cppbuiltin.h gt-c-family-c-pragma.h gt-c-family-c-format.h gt-c-c-objc-common.h gt-c-c-parser.h gt-c-family-c-common.h gt-c-family-c-format.h gt-c-family-c-cppbuiltin.h gt-c-family-c-pragma.h gt-cp-call.h gt-cp-class.h gt-cp-constexpr.h gt-cp-constraint.h gt-cp-coroutines.h gt-cp-cp-gimplify.h gt-cp-cp-lang.h gt-cp-cp-objcp-common.h gt-cp-decl.h gt-cp-decl2.h gt-cp-except.h gt-cp-friend.h gt-cp-init.h gt-cp-lambda.h gt-cp-lex.h gt-cp-logic.h gt-cp-mangle.h gt-cp-method.h gt-cp-module.h gt-cp-name-lookup.h gt-cp-parser.h gt-cp-pt.h gt-cp-rtti.h gt-cp-semantics.h gt-cp-tree.h gt-cp-typeck2.h gt-cp-vtable-class-hierarchy.h gt-d-d-builtins.h gt-d-d-lang.h gt-d-typeinfo.h gt-fortran-f95-lang.h gt-fortran-trans-decl.h gt-fortran-trans-intrinsic.h gt-fortran-trans-io.h gt-fortran-trans-stmt.h gt-fortran-trans-types.h gt-go-go-lang.h gt-jit-dummy-frontend.h gt-lto-lto-lang.h gt-lto-lto.h gt-lto-lto-common.h gt-lto-lto-dump.h gt-objc-objc-act.h gt-objc-objc-runtime-shared-support.h gt-objc-objc-gnu-runtime-abi-01.h gt-objc-objc-next-runtime-abi-01.h gt-objc-objc-next-runtime-abi-02.h gt-c-c-parser.h gt-c-c-decl.h gt-c-c-objc-common.h gt-c-family-c-common.h gt-c-family-c-cppbuiltin.h gt-c-family-c-pragma.h gt-c-family-c-format.h gt-c-family-c-common.h gt-c-family-c-format.h gt-c-family-c-cppbuiltin.h gt-c-family-c-pragma.h gt-cp-call.h gt-cp-class.h gt-cp-constexpr.h gt-cp-constraint.h gt-cp-coroutines.h gt-cp-cp-gimplify.h gt-objcp-objcp-lang.h gt-cp-cp-objcp-common.h gt-cp-decl.h gt-cp-decl2.h gt-cp-except.h gt-cp-friend.h gt-cp-init.h gt-cp-lambda.h gt-cp-lex.h gt-cp-logic.h gt-cp-mangle.h gt-cp-method.h gt-cp-module.h gt-cp-name-lookup.h gt-cp-parser.h gt-cp-pt.h gt-cp-rtti.h gt-cp-semantics.h gt-cp-tree.h gt-cp-typeck2.h gt-cp-vtable-class-hierarchy.h gt-objc-objc-act.h gt-objc-objc-gnu-runtime-abi-01.h gt-objc-objc-next-runtime-abi-01.h gt-objc-objc-next-runtime-abi-02.h gt-objc-objc-runtime-shared-support.h +GTFILES_LANG_H=gtype-ada.h gtype-c.h gtype-cp.h gtype-d.h gtype-fortran.h gtype-go.h gtype-jit.h gtype-lto.h gtype-objc.h gtype-objcp.h diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/backend.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/backend.h new file mode 100644 index 0000000..7ff5405 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/backend.h @@ -0,0 +1,35 @@ +/* Common Backend requirements. + + Copyright (C) 2015-2022 Free Software Foundation, Inc. + Contributed by Andrew MacLeod + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_BACKEND_H +#define GCC_BACKEND_H + +/* This is an aggregation header file. This means it should contain only + other include files. */ + +#include "tm.h" +#include "function.h" +#include "bitmap.h" +#include "sbitmap.h" +#include "basic-block.h" +#include "cfg.h" + +#endif /*GCC_BACKEND_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/basic-block.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/basic-block.h new file mode 100644 index 0000000..e3fff1f --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/basic-block.h @@ -0,0 +1,650 @@ +/* Define control flow data structures for the CFG. + Copyright (C) 1987-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_BASIC_BLOCK_H +#define GCC_BASIC_BLOCK_H + +#include + +/* Control flow edge information. */ +class GTY((user)) edge_def { +public: + /* The two blocks at the ends of the edge. */ + basic_block src; + basic_block dest; + + /* Instructions queued on the edge. */ + union edge_def_insns { + gimple_seq g; + rtx_insn *r; + } insns; + + /* Auxiliary info specific to a pass. */ + PTR aux; + + /* Location of any goto implicit in the edge. */ + location_t goto_locus; + + /* The index number corresponding to this edge in the edge vector + dest->preds. */ + unsigned int dest_idx; + + int flags; /* see cfg-flags.def */ + profile_probability probability; + + /* Return count of edge E. */ + inline profile_count count () const; +}; + +/* Masks for edge.flags. */ +#define DEF_EDGE_FLAG(NAME,IDX) EDGE_##NAME = 1 << IDX , +enum cfg_edge_flags { +#include "cfg-flags.def" + LAST_CFG_EDGE_FLAG /* this is only used for EDGE_ALL_FLAGS */ +}; +#undef DEF_EDGE_FLAG + +/* Bit mask for all edge flags. */ +#define EDGE_ALL_FLAGS ((LAST_CFG_EDGE_FLAG - 1) * 2 - 1) + +/* The following four flags all indicate something special about an edge. + Test the edge flags on EDGE_COMPLEX to detect all forms of "strange" + control flow transfers. */ +#define EDGE_COMPLEX \ + (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE) + +struct GTY(()) rtl_bb_info { + /* The first insn of the block is embedded into bb->il.x. */ + /* The last insn of the block. */ + rtx_insn *end_; + + /* In CFGlayout mode points to insn notes/jumptables to be placed just before + and after the block. */ + rtx_insn *header_; + rtx_insn *footer_; +}; + +struct GTY(()) gimple_bb_info { + /* Sequence of statements in this block. */ + gimple_seq seq; + + /* PHI nodes for this block. */ + gimple_seq phi_nodes; +}; + +/* A basic block is a sequence of instructions with only one entry and + only one exit. If any one of the instructions are executed, they + will all be executed, and in sequence from first to last. + + There may be COND_EXEC instructions in the basic block. The + COND_EXEC *instructions* will be executed -- but if the condition + is false the conditionally executed *expressions* will of course + not be executed. We don't consider the conditionally executed + expression (which might have side-effects) to be in a separate + basic block because the program counter will always be at the same + location after the COND_EXEC instruction, regardless of whether the + condition is true or not. + + Basic blocks need not start with a label nor end with a jump insn. + For example, a previous basic block may just "conditionally fall" + into the succeeding basic block, and the last basic block need not + end with a jump insn. Block 0 is a descendant of the entry block. + + A basic block beginning with two labels cannot have notes between + the labels. + + Data for jump tables are stored in jump_insns that occur in no + basic block even though these insns can follow or precede insns in + basic blocks. */ + +/* Basic block information indexed by block number. */ +struct GTY((chain_next ("%h.next_bb"), chain_prev ("%h.prev_bb"))) basic_block_def { + /* The edges into and out of the block. */ + vec *preds; + vec *succs; + + /* Auxiliary info specific to a pass. */ + PTR GTY ((skip (""))) aux; + + /* Innermost loop containing the block. */ + class loop *loop_father; + + /* The dominance and postdominance information node. */ + struct et_node * GTY ((skip (""))) dom[2]; + + /* Previous and next blocks in the chain. */ + basic_block prev_bb; + basic_block next_bb; + + union basic_block_il_dependent { + struct gimple_bb_info GTY ((tag ("0"))) gimple; + struct { + rtx_insn *head_; + struct rtl_bb_info * rtl; + } GTY ((tag ("1"))) x; + } GTY ((desc ("((%1.flags & BB_RTL) != 0)"))) il; + + /* Various flags. See cfg-flags.def. */ + int flags; + + /* The index of this block. */ + int index; + + /* Expected number of executions: calculated in profile.cc. */ + profile_count count; + + /* The discriminator for this block. The discriminator distinguishes + among several basic blocks that share a common locus, allowing for + more accurate sample-based profiling. */ + int discriminator; +}; + +/* This ensures that struct gimple_bb_info is smaller than + struct rtl_bb_info, so that inlining the former into basic_block_def + is the better choice. */ +typedef int __assert_gimple_bb_smaller_rtl_bb + [(int) sizeof (struct rtl_bb_info) + - (int) sizeof (struct gimple_bb_info)]; + + +#define BB_FREQ_MAX 10000 + +/* Masks for basic_block.flags. */ +#define DEF_BASIC_BLOCK_FLAG(NAME,IDX) BB_##NAME = 1 << IDX , +enum cfg_bb_flags +{ +#include "cfg-flags.def" + LAST_CFG_BB_FLAG /* this is only used for BB_ALL_FLAGS */ +}; +#undef DEF_BASIC_BLOCK_FLAG + +/* Bit mask for all basic block flags. */ +#define BB_ALL_FLAGS ((LAST_CFG_BB_FLAG - 1) * 2 - 1) + +/* Bit mask for all basic block flags that must be preserved. These are + the bit masks that are *not* cleared by clear_bb_flags. */ +#define BB_FLAGS_TO_PRESERVE \ + (BB_DISABLE_SCHEDULE | BB_RTL | BB_NON_LOCAL_GOTO_TARGET \ + | BB_HOT_PARTITION | BB_COLD_PARTITION) + +/* Dummy bitmask for convenience in the hot/cold partitioning code. */ +#define BB_UNPARTITIONED 0 + +/* Partitions, to be used when partitioning hot and cold basic blocks into + separate sections. */ +#define BB_PARTITION(bb) ((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) +#define BB_SET_PARTITION(bb, part) do { \ + basic_block bb_ = (bb); \ + bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) \ + | (part)); \ +} while (0) + +#define BB_COPY_PARTITION(dstbb, srcbb) \ + BB_SET_PARTITION (dstbb, BB_PARTITION (srcbb)) + +/* Defines for accessing the fields of the CFG structure for function FN. */ +#define ENTRY_BLOCK_PTR_FOR_FN(FN) ((FN)->cfg->x_entry_block_ptr) +#define EXIT_BLOCK_PTR_FOR_FN(FN) ((FN)->cfg->x_exit_block_ptr) +#define basic_block_info_for_fn(FN) ((FN)->cfg->x_basic_block_info) +#define n_basic_blocks_for_fn(FN) ((FN)->cfg->x_n_basic_blocks) +#define n_edges_for_fn(FN) ((FN)->cfg->x_n_edges) +#define last_basic_block_for_fn(FN) ((FN)->cfg->x_last_basic_block) +#define label_to_block_map_for_fn(FN) ((FN)->cfg->x_label_to_block_map) +#define profile_status_for_fn(FN) ((FN)->cfg->x_profile_status) + +#define BASIC_BLOCK_FOR_FN(FN,N) \ + ((*basic_block_info_for_fn (FN))[(N)]) +#define SET_BASIC_BLOCK_FOR_FN(FN,N,BB) \ + ((*basic_block_info_for_fn (FN))[(N)] = (BB)) + +/* For iterating over basic blocks. */ +#define FOR_BB_BETWEEN(BB, FROM, TO, DIR) \ + for (BB = FROM; BB != TO; BB = BB->DIR) + +#define FOR_EACH_BB_FN(BB, FN) \ + FOR_BB_BETWEEN (BB, (FN)->cfg->x_entry_block_ptr->next_bb, (FN)->cfg->x_exit_block_ptr, next_bb) + +#define FOR_EACH_BB_REVERSE_FN(BB, FN) \ + FOR_BB_BETWEEN (BB, (FN)->cfg->x_exit_block_ptr->prev_bb, (FN)->cfg->x_entry_block_ptr, prev_bb) + +/* For iterating over insns in basic block. */ +#define FOR_BB_INSNS(BB, INSN) \ + for ((INSN) = BB_HEAD (BB); \ + (INSN) && (INSN) != NEXT_INSN (BB_END (BB)); \ + (INSN) = NEXT_INSN (INSN)) + +/* For iterating over insns in basic block when we might remove the + current insn. */ +#define FOR_BB_INSNS_SAFE(BB, INSN, CURR) \ + for ((INSN) = BB_HEAD (BB), (CURR) = (INSN) ? NEXT_INSN ((INSN)): NULL; \ + (INSN) && (INSN) != NEXT_INSN (BB_END (BB)); \ + (INSN) = (CURR), (CURR) = (INSN) ? NEXT_INSN ((INSN)) : NULL) + +#define FOR_BB_INSNS_REVERSE(BB, INSN) \ + for ((INSN) = BB_END (BB); \ + (INSN) && (INSN) != PREV_INSN (BB_HEAD (BB)); \ + (INSN) = PREV_INSN (INSN)) + +#define FOR_BB_INSNS_REVERSE_SAFE(BB, INSN, CURR) \ + for ((INSN) = BB_END (BB),(CURR) = (INSN) ? PREV_INSN ((INSN)) : NULL; \ + (INSN) && (INSN) != PREV_INSN (BB_HEAD (BB)); \ + (INSN) = (CURR), (CURR) = (INSN) ? PREV_INSN ((INSN)) : NULL) + +/* Cycles through _all_ basic blocks, even the fake ones (entry and + exit block). */ + +#define FOR_ALL_BB_FN(BB, FN) \ + for (BB = ENTRY_BLOCK_PTR_FOR_FN (FN); BB; BB = BB->next_bb) + + +/* Stuff for recording basic block info. */ + +/* For now, these will be functions (so that they can include checked casts + to rtx_insn. Once the underlying fields are converted from rtx + to rtx_insn, these can be converted back to macros. */ + +#define BB_HEAD(B) (B)->il.x.head_ +#define BB_END(B) (B)->il.x.rtl->end_ +#define BB_HEADER(B) (B)->il.x.rtl->header_ +#define BB_FOOTER(B) (B)->il.x.rtl->footer_ + +/* Special block numbers [markers] for entry and exit. + Neither of them is supposed to hold actual statements. */ +#define ENTRY_BLOCK (0) +#define EXIT_BLOCK (1) + +/* The two blocks that are always in the cfg. */ +#define NUM_FIXED_BLOCKS (2) + +/* This is the value which indicates no edge is present. */ +#define EDGE_INDEX_NO_EDGE -1 + +/* EDGE_INDEX returns an integer index for an edge, or EDGE_INDEX_NO_EDGE + if there is no edge between the 2 basic blocks. */ +#define EDGE_INDEX(el, pred, succ) (find_edge_index ((el), (pred), (succ))) + +/* INDEX_EDGE_PRED_BB and INDEX_EDGE_SUCC_BB return a pointer to the basic + block which is either the pred or succ end of the indexed edge. */ +#define INDEX_EDGE_PRED_BB(el, index) ((el)->index_to_edge[(index)]->src) +#define INDEX_EDGE_SUCC_BB(el, index) ((el)->index_to_edge[(index)]->dest) + +/* INDEX_EDGE returns a pointer to the edge. */ +#define INDEX_EDGE(el, index) ((el)->index_to_edge[(index)]) + +/* Number of edges in the compressed edge list. */ +#define NUM_EDGES(el) ((el)->num_edges) + +/* BB is assumed to contain conditional jump. Return the fallthru edge. */ +#define FALLTHRU_EDGE(bb) (EDGE_SUCC ((bb), 0)->flags & EDGE_FALLTHRU \ + ? EDGE_SUCC ((bb), 0) : EDGE_SUCC ((bb), 1)) + +/* BB is assumed to contain conditional jump. Return the branch edge. */ +#define BRANCH_EDGE(bb) (EDGE_SUCC ((bb), 0)->flags & EDGE_FALLTHRU \ + ? EDGE_SUCC ((bb), 1) : EDGE_SUCC ((bb), 0)) + +/* Return expected execution frequency of the edge E. */ +#define EDGE_FREQUENCY(e) e->count ().to_frequency (cfun) + +/* Compute a scale factor (or probability) suitable for scaling of + gcov_type values via apply_probability() and apply_scale(). */ +#define GCOV_COMPUTE_SCALE(num,den) \ + ((den) ? RDIV ((num) * REG_BR_PROB_BASE, (den)) : REG_BR_PROB_BASE) + +/* Return nonzero if edge is critical. */ +#define EDGE_CRITICAL_P(e) (EDGE_COUNT ((e)->src->succs) >= 2 \ + && EDGE_COUNT ((e)->dest->preds) >= 2) + +#define EDGE_COUNT(ev) vec_safe_length (ev) +#define EDGE_I(ev,i) (*ev)[(i)] +#define EDGE_PRED(bb,i) (*(bb)->preds)[(i)] +#define EDGE_SUCC(bb,i) (*(bb)->succs)[(i)] + +/* Returns true if BB has precisely one successor. */ + +static inline bool +single_succ_p (const_basic_block bb) +{ + return EDGE_COUNT (bb->succs) == 1; +} + +/* Returns true if BB has precisely one predecessor. */ + +static inline bool +single_pred_p (const_basic_block bb) +{ + return EDGE_COUNT (bb->preds) == 1; +} + +/* Returns the single successor edge of basic block BB. Aborts if + BB does not have exactly one successor. */ + +static inline edge +single_succ_edge (const_basic_block bb) +{ + gcc_checking_assert (single_succ_p (bb)); + return EDGE_SUCC (bb, 0); +} + +/* Returns the single predecessor edge of basic block BB. Aborts + if BB does not have exactly one predecessor. */ + +static inline edge +single_pred_edge (const_basic_block bb) +{ + gcc_checking_assert (single_pred_p (bb)); + return EDGE_PRED (bb, 0); +} + +/* Returns the single successor block of basic block BB. Aborts + if BB does not have exactly one successor. */ + +static inline basic_block +single_succ (const_basic_block bb) +{ + return single_succ_edge (bb)->dest; +} + +/* Returns the single predecessor block of basic block BB. Aborts + if BB does not have exactly one predecessor.*/ + +static inline basic_block +single_pred (const_basic_block bb) +{ + return single_pred_edge (bb)->src; +} + +/* Iterator object for edges. */ + +struct edge_iterator { + unsigned index; + vec **container; +}; + +static inline vec * +ei_container (edge_iterator i) +{ + gcc_checking_assert (i.container); + return *i.container; +} + +#define ei_start(iter) ei_start_1 (&(iter)) +#define ei_last(iter) ei_last_1 (&(iter)) + +/* Return an iterator pointing to the start of an edge vector. */ +static inline edge_iterator +ei_start_1 (vec **ev) +{ + edge_iterator i; + + i.index = 0; + i.container = ev; + + return i; +} + +/* Return an iterator pointing to the last element of an edge + vector. */ +static inline edge_iterator +ei_last_1 (vec **ev) +{ + edge_iterator i; + + i.index = EDGE_COUNT (*ev) - 1; + i.container = ev; + + return i; +} + +/* Is the iterator `i' at the end of the sequence? */ +static inline bool +ei_end_p (edge_iterator i) +{ + return (i.index == EDGE_COUNT (ei_container (i))); +} + +/* Is the iterator `i' at one position before the end of the + sequence? */ +static inline bool +ei_one_before_end_p (edge_iterator i) +{ + return (i.index + 1 == EDGE_COUNT (ei_container (i))); +} + +/* Advance the iterator to the next element. */ +static inline void +ei_next (edge_iterator *i) +{ + gcc_checking_assert (i->index < EDGE_COUNT (ei_container (*i))); + i->index++; +} + +/* Move the iterator to the previous element. */ +static inline void +ei_prev (edge_iterator *i) +{ + gcc_checking_assert (i->index > 0); + i->index--; +} + +/* Return the edge pointed to by the iterator `i'. */ +static inline edge +ei_edge (edge_iterator i) +{ + return EDGE_I (ei_container (i), i.index); +} + +/* Return an edge pointed to by the iterator. Do it safely so that + NULL is returned when the iterator is pointing at the end of the + sequence. */ +static inline edge +ei_safe_edge (edge_iterator i) +{ + return !ei_end_p (i) ? ei_edge (i) : NULL; +} + +/* Return 1 if we should continue to iterate. Return 0 otherwise. + *Edge P is set to the next edge if we are to continue to iterate + and NULL otherwise. */ + +static inline bool +ei_cond (edge_iterator ei, edge *p) +{ + if (!ei_end_p (ei)) + { + *p = ei_edge (ei); + return 1; + } + else + { + *p = NULL; + return 0; + } +} + +/* This macro serves as a convenient way to iterate each edge in a + vector of predecessor or successor edges. It must not be used when + an element might be removed during the traversal, otherwise + elements will be missed. Instead, use a for-loop like that shown + in the following pseudo-code: + + FOR (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) + { + IF (e != taken_edge) + remove_edge (e); + ELSE + ei_next (&ei); + } +*/ + +#define FOR_EACH_EDGE(EDGE,ITER,EDGE_VEC) \ + for ((ITER) = ei_start ((EDGE_VEC)); \ + ei_cond ((ITER), &(EDGE)); \ + ei_next (&(ITER))) + +#define CLEANUP_EXPENSIVE 1 /* Do relatively expensive optimizations + except for edge forwarding */ +#define CLEANUP_CROSSJUMP 2 /* Do crossjumping. */ +#define CLEANUP_POST_REGSTACK 4 /* We run after reg-stack and need + to care REG_DEAD notes. */ +#define CLEANUP_THREADING 8 /* Do jump threading. */ +#define CLEANUP_NO_INSN_DEL 16 /* Do not try to delete trivially dead + insns. */ +#define CLEANUP_CFGLAYOUT 32 /* Do cleanup in cfglayout mode. */ +#define CLEANUP_CFG_CHANGED 64 /* The caller changed the CFG. */ +#define CLEANUP_NO_PARTITIONING 128 /* Do not try to fix partitions. */ +#define CLEANUP_FORCE_FAST_DCE 0x100 /* Force run_fast_dce to be called + at least once. */ + +/* Return true if BB is in a transaction. */ + +static inline bool +bb_in_transaction (basic_block bb) +{ + return bb->flags & BB_IN_TRANSACTION; +} + +/* Return true when one of the predecessor edges of BB is marked with EDGE_EH. */ +static inline bool +bb_has_eh_pred (basic_block bb) +{ + edge e; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->preds) + { + if (e->flags & EDGE_EH) + return true; + } + return false; +} + +/* Return true when one of the predecessor edges of BB is marked with EDGE_ABNORMAL. */ +static inline bool +bb_has_abnormal_pred (basic_block bb) +{ + edge e; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->preds) + { + if (e->flags & EDGE_ABNORMAL) + return true; + } + return false; +} + +/* Return the fallthru edge in EDGES if it exists, NULL otherwise. */ +static inline edge +find_fallthru_edge (vec *edges) +{ + edge e; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, edges) + if (e->flags & EDGE_FALLTHRU) + break; + + return e; +} + +/* Check tha probability is sane. */ + +static inline void +check_probability (int prob) +{ + gcc_checking_assert (prob >= 0 && prob <= REG_BR_PROB_BASE); +} + +/* Given PROB1 and PROB2, return PROB1*PROB2/REG_BR_PROB_BASE. + Used to combine BB probabilities. */ + +static inline int +combine_probabilities (int prob1, int prob2) +{ + check_probability (prob1); + check_probability (prob2); + return RDIV (prob1 * prob2, REG_BR_PROB_BASE); +} + +/* Apply scale factor SCALE on frequency or count FREQ. Use this + interface when potentially scaling up, so that SCALE is not + constrained to be < REG_BR_PROB_BASE. */ + +static inline gcov_type +apply_scale (gcov_type freq, gcov_type scale) +{ + return RDIV (freq * scale, REG_BR_PROB_BASE); +} + +/* Apply probability PROB on frequency or count FREQ. */ + +static inline gcov_type +apply_probability (gcov_type freq, int prob) +{ + check_probability (prob); + return apply_scale (freq, prob); +} + +/* Return inverse probability for PROB. */ + +static inline int +inverse_probability (int prob1) +{ + check_probability (prob1); + return REG_BR_PROB_BASE - prob1; +} + +/* Return true if BB has at least one abnormal outgoing edge. */ + +static inline bool +has_abnormal_or_eh_outgoing_edge_p (basic_block bb) +{ + edge e; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->succs) + if (e->flags & (EDGE_ABNORMAL | EDGE_EH)) + return true; + + return false; +} + +/* Return true when one of the predecessor edges of BB is marked with + EDGE_ABNORMAL_CALL or EDGE_EH. */ + +static inline bool +has_abnormal_call_or_eh_pred_edge_p (basic_block bb) +{ + edge e; + edge_iterator ei; + + FOR_EACH_EDGE (e, ei, bb->preds) + if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)) + return true; + + return false; +} + +/* Return count of edge E. */ +inline profile_count edge_def::count () const +{ + return src->count.apply_probability (probability); +} + +#endif /* GCC_BASIC_BLOCK_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/bb-reorder.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/bb-reorder.h new file mode 100644 index 0000000..ee75472 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/bb-reorder.h @@ -0,0 +1,40 @@ +/* Basic block reordering routines for the GNU compiler. + Copyright (C) 2000-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#ifndef GCC_BB_REORDER +#define GCC_BB_REORDER + +/* Target-specific globals. */ +struct target_bb_reorder { + /* Length of unconditional jump instruction. */ + int x_uncond_jump_length; +}; + +extern struct target_bb_reorder default_target_bb_reorder; +#if SWITCHABLE_TARGET +extern struct target_bb_reorder *this_target_bb_reorder; +#else +#define this_target_bb_reorder (&default_target_bb_reorder) +#endif + +extern int get_uncond_jump_length (void); + +extern void insert_section_boundary_note (void); + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/bitmap.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/bitmap.h new file mode 100644 index 0000000..7fba443 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/bitmap.h @@ -0,0 +1,1089 @@ +/* Functions to support general ended bitmaps. + Copyright (C) 1997-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_BITMAP_H +#define GCC_BITMAP_H + +/* Implementation of sparse integer sets as a linked list or tree. + + This sparse set representation is suitable for sparse sets with an + unknown (a priori) universe. + + Sets are represented as double-linked lists of container nodes of + type "struct bitmap_element" or as a binary trees of the same + container nodes. Each container node consists of an index for the + first member that could be held in the container, a small array of + integers that represent the members in the container, and pointers + to the next and previous element in the linked list, or left and + right children in the tree. In linked-list form, the container + nodes in the list are sorted in ascending order, i.e. the head of + the list holds the element with the smallest member of the set. + In tree form, nodes to the left have a smaller container index. + + For a given member I in the set: + - the element for I will have index is I / (bits per element) + - the position for I within element is I % (bits per element) + + This representation is very space-efficient for large sparse sets, and + the size of the set can be changed dynamically without much overhead. + An important parameter is the number of bits per element. In this + implementation, there are 128 bits per element. This results in a + high storage overhead *per element*, but a small overall overhead if + the set is very sparse. + + The storage requirements for linked-list sparse sets are O(E), with E->N + in the worst case (a sparse set with large distances between the values + of the set members). + + This representation also works well for data flow problems where the size + of the set may grow dynamically, but care must be taken that the member_p, + add_member, and remove_member operations occur with a suitable access + pattern. + + The linked-list set representation works well for problems involving very + sparse sets. The canonical example in GCC is, of course, the "set of + sets" for some CFG-based data flow problems (liveness analysis, dominance + frontiers, etc.). + + For random-access sparse sets of unknown universe, the binary tree + representation is likely to be a more suitable choice. Theoretical + access times for the binary tree representation are better than those + for the linked-list, but in practice this is only true for truely + random access. + + Often the most suitable representation during construction of the set + is not the best choice for the usage of the set. For such cases, the + "view" of the set can be changed from one representation to the other. + This is an O(E) operation: + + * from list to tree view : bitmap_tree_view + * from tree to list view : bitmap_list_view + + Traversing linked lists or trees can be cache-unfriendly. Performance + can be improved by keeping container nodes in the set grouped together + in memory, using a dedicated obstack for a set (or group of related + sets). Elements allocated on obstacks are released to a free-list and + taken off the free list. If multiple sets are allocated on the same + obstack, elements freed from one set may be re-used for one of the other + sets. This usually helps avoid cache misses. + + A single free-list is used for all sets allocated in GGC space. This is + bad for persistent sets, so persistent sets should be allocated on an + obstack whenever possible. + + For random-access sets with a known, relatively small universe size, the + SparseSet or simple bitmap representations may be more efficient than a + linked-list set. + + + LINKED LIST FORM + ================ + + In linked-list form, in-order iterations of the set can be executed + efficiently. The downside is that many random-access operations are + relatively slow, because the linked list has to be traversed to test + membership (i.e. member_p/ add_member/remove_member). + + To improve the performance of this set representation, the last + accessed element and its index are cached. For membership tests on + members close to recently accessed members, the cached last element + improves membership test to a constant-time operation. + + The following operations can always be performed in O(1) time in + list view: + + * clear : bitmap_clear + * smallest_member : bitmap_first_set_bit + * choose_one : (not implemented, but could be + in constant time) + + The following operations can be performed in O(E) time worst-case in + list view (with E the number of elements in the linked list), but in + O(1) time with a suitable access patterns: + + * member_p : bitmap_bit_p + * add_member : bitmap_set_bit / bitmap_set_range + * remove_member : bitmap_clear_bit / bitmap_clear_range + + The following operations can be performed in O(E) time in list view: + + * cardinality : bitmap_count_bits + * largest_member : bitmap_last_set_bit (but this could + in constant time with a pointer to + the last element in the chain) + * set_size : bitmap_last_set_bit + + In tree view the following operations can all be performed in O(log E) + amortized time with O(E) worst-case behavior. + + * smallest_member + * largest_member + * set_size + * member_p + * add_member + * remove_member + + Additionally, the linked-list sparse set representation supports + enumeration of the members in O(E) time: + + * forall : EXECUTE_IF_SET_IN_BITMAP + * set_copy : bitmap_copy + * set_intersection : bitmap_intersect_p / + bitmap_and / bitmap_and_into / + EXECUTE_IF_AND_IN_BITMAP + * set_union : bitmap_ior / bitmap_ior_into + * set_difference : bitmap_intersect_compl_p / + bitmap_and_comp / bitmap_and_comp_into / + EXECUTE_IF_AND_COMPL_IN_BITMAP + * set_disjuction : bitmap_xor_comp / bitmap_xor_comp_into + * set_compare : bitmap_equal_p + + Some operations on 3 sets that occur frequently in data flow problems + are also implemented: + + * A | (B & C) : bitmap_ior_and_into + * A | (B & ~C) : bitmap_ior_and_compl / + bitmap_ior_and_compl_into + + + BINARY TREE FORM + ================ + An alternate "view" of a bitmap is its binary tree representation. + For this representation, splay trees are used because they can be + implemented using the same data structures as the linked list, with + no overhead for meta-data (like color, or rank) on the tree nodes. + + In binary tree form, random-access to the set is much more efficient + than for the linked-list representation. Downsides are the high cost + of clearing the set, and the relatively large number of operations + necessary to balance the tree. Also, iterating the set members is + not supported. + + As for the linked-list representation, the last accessed element and + its index are cached, so that membership tests on the latest accessed + members is a constant-time operation. Other lookups take O(logE) + time amortized (but O(E) time worst-case). + + The following operations can always be performed in O(1) time: + + * choose_one : (not implemented, but could be + implemented in constant time) + + The following operations can be performed in O(logE) time amortized + but O(E) time worst-case, but in O(1) time if the same element is + accessed. + + * member_p : bitmap_bit_p + * add_member : bitmap_set_bit + * remove_member : bitmap_clear_bit + + The following operations can be performed in O(logE) time amortized + but O(E) time worst-case: + + * smallest_member : bitmap_first_set_bit + * largest_member : bitmap_last_set_bit + * set_size : bitmap_last_set_bit + + The following operations can be performed in O(E) time: + + * clear : bitmap_clear + + The binary tree sparse set representation does *not* support any form + of enumeration, and does also *not* support logical operations on sets. + The binary tree representation is only supposed to be used for sets + on which many random-access membership tests will happen. */ + +#include "obstack.h" +#include "array-traits.h" + +/* Bitmap memory usage. */ +class bitmap_usage: public mem_usage +{ +public: + /* Default contructor. */ + bitmap_usage (): m_nsearches (0), m_search_iter (0) {} + /* Constructor. */ + bitmap_usage (size_t allocated, size_t times, size_t peak, + uint64_t nsearches, uint64_t search_iter) + : mem_usage (allocated, times, peak), + m_nsearches (nsearches), m_search_iter (search_iter) {} + + /* Sum the usage with SECOND usage. */ + bitmap_usage + operator+ (const bitmap_usage &second) + { + return bitmap_usage (m_allocated + second.m_allocated, + m_times + second.m_times, + m_peak + second.m_peak, + m_nsearches + second.m_nsearches, + m_search_iter + second.m_search_iter); + } + + /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */ + inline void + dump (mem_location *loc, const mem_usage &total) const + { + char *location_string = loc->to_string (); + + fprintf (stderr, "%-48s " PRsa (9) ":%5.1f%%" + PRsa (9) PRsa (9) ":%5.1f%%" + PRsa (11) PRsa (11) "%10s\n", + location_string, SIZE_AMOUNT (m_allocated), + get_percent (m_allocated, total.m_allocated), + SIZE_AMOUNT (m_peak), SIZE_AMOUNT (m_times), + get_percent (m_times, total.m_times), + SIZE_AMOUNT (m_nsearches), SIZE_AMOUNT (m_search_iter), + loc->m_ggc ? "ggc" : "heap"); + + free (location_string); + } + + /* Dump header with NAME. */ + static inline void + dump_header (const char *name) + { + fprintf (stderr, "%-48s %11s%16s%17s%12s%12s%10s\n", name, "Leak", "Peak", + "Times", "N searches", "Search iter", "Type"); + } + + /* Number search operations. */ + uint64_t m_nsearches; + /* Number of search iterations. */ + uint64_t m_search_iter; +}; + +/* Bitmap memory description. */ +extern mem_alloc_description bitmap_mem_desc; + +/* Fundamental storage type for bitmap. */ + +typedef unsigned long BITMAP_WORD; +/* BITMAP_WORD_BITS needs to be unsigned, but cannot contain casts as + it is used in preprocessor directives -- hence the 1u. */ +#define BITMAP_WORD_BITS (CHAR_BIT * SIZEOF_LONG * 1u) + +/* Number of words to use for each element in the linked list. */ + +#ifndef BITMAP_ELEMENT_WORDS +#define BITMAP_ELEMENT_WORDS ((128 + BITMAP_WORD_BITS - 1) / BITMAP_WORD_BITS) +#endif + +/* Number of bits in each actual element of a bitmap. */ + +#define BITMAP_ELEMENT_ALL_BITS (BITMAP_ELEMENT_WORDS * BITMAP_WORD_BITS) + +/* Obstack for allocating bitmaps and elements from. */ +struct bitmap_obstack { + struct bitmap_element *elements; + bitmap_head *heads; + struct obstack obstack; +}; + +/* Bitmap set element. We use a linked list to hold only the bits that + are set. This allows for use to grow the bitset dynamically without + having to realloc and copy a giant bit array. + + The free list is implemented as a list of lists. There is one + outer list connected together by prev fields. Each element of that + outer is an inner list (that may consist only of the outer list + element) that are connected by the next fields. The prev pointer + is undefined for interior elements. This allows + bitmap_elt_clear_from to be implemented in unit time rather than + linear in the number of elements to be freed. */ + +struct GTY((chain_next ("%h.next"))) bitmap_element { + /* In list form, the next element in the linked list; + in tree form, the left child node in the tree. */ + struct bitmap_element *next; + /* In list form, the previous element in the linked list; + in tree form, the right child node in the tree. */ + struct bitmap_element *prev; + /* regno/BITMAP_ELEMENT_ALL_BITS. */ + unsigned int indx; + /* Bits that are set, counting from INDX, inclusive */ + BITMAP_WORD bits[BITMAP_ELEMENT_WORDS]; +}; + +/* Head of bitmap linked list. The 'current' member points to something + already pointed to by the chain started by first, so GTY((skip)) it. */ + +class GTY(()) bitmap_head { +public: + static bitmap_obstack crashme; + /* Poison obstack to not make it not a valid initialized GC bitmap. */ + CONSTEXPR bitmap_head() + : indx (0), tree_form (false), padding (0), alloc_descriptor (0), first (NULL), + current (NULL), obstack (&crashme) + {} + /* Index of last element looked at. */ + unsigned int indx; + /* False if the bitmap is in list form; true if the bitmap is in tree form. + Bitmap iterators only work on bitmaps in list form. */ + unsigned tree_form: 1; + /* Next integer is shifted, so padding is needed. */ + unsigned padding: 2; + /* Bitmap UID used for memory allocation statistics. */ + unsigned alloc_descriptor: 29; + /* In list form, the first element in the linked list; + in tree form, the root of the tree. */ + bitmap_element *first; + /* Last element looked at. */ + bitmap_element * GTY((skip(""))) current; + /* Obstack to allocate elements from. If NULL, then use GGC allocation. */ + bitmap_obstack * GTY((skip(""))) obstack; + + /* Dump bitmap. */ + void dump (); + + /* Get bitmap descriptor UID casted to an unsigned integer pointer. + Shift the descriptor because pointer_hash::hash is + doing >> 3 shift operation. */ + unsigned *get_descriptor () + { + return (unsigned *)(ptrdiff_t)(alloc_descriptor << 3); + } +}; + +/* Global data */ +extern bitmap_element bitmap_zero_bits; /* Zero bitmap element */ +extern bitmap_obstack bitmap_default_obstack; /* Default bitmap obstack */ + +/* Change the view of the bitmap to list, or tree. */ +void bitmap_list_view (bitmap); +void bitmap_tree_view (bitmap); + +/* Clear a bitmap by freeing up the linked list. */ +extern void bitmap_clear (bitmap); + +/* Copy a bitmap to another bitmap. */ +extern void bitmap_copy (bitmap, const_bitmap); + +/* Move a bitmap to another bitmap. */ +extern void bitmap_move (bitmap, bitmap); + +/* True if two bitmaps are identical. */ +extern bool bitmap_equal_p (const_bitmap, const_bitmap); + +/* True if the bitmaps intersect (their AND is non-empty). */ +extern bool bitmap_intersect_p (const_bitmap, const_bitmap); + +/* True if the complement of the second intersects the first (their + AND_COMPL is non-empty). */ +extern bool bitmap_intersect_compl_p (const_bitmap, const_bitmap); + +/* True if MAP is an empty bitmap. */ +inline bool bitmap_empty_p (const_bitmap map) +{ + return !map->first; +} + +/* True if the bitmap has only a single bit set. */ +extern bool bitmap_single_bit_set_p (const_bitmap); + +/* Count the number of bits set in the bitmap. */ +extern unsigned long bitmap_count_bits (const_bitmap); + +/* Count the number of unique bits set across the two bitmaps. */ +extern unsigned long bitmap_count_unique_bits (const_bitmap, const_bitmap); + +/* Boolean operations on bitmaps. The _into variants are two operand + versions that modify the first source operand. The other variants + are three operand versions that to not destroy the source bitmaps. + The operations supported are &, & ~, |, ^. */ +extern void bitmap_and (bitmap, const_bitmap, const_bitmap); +extern bool bitmap_and_into (bitmap, const_bitmap); +extern bool bitmap_and_compl (bitmap, const_bitmap, const_bitmap); +extern bool bitmap_and_compl_into (bitmap, const_bitmap); +#define bitmap_compl_and(DST, A, B) bitmap_and_compl (DST, B, A) +extern void bitmap_compl_and_into (bitmap, const_bitmap); +extern void bitmap_clear_range (bitmap, unsigned int, unsigned int); +extern void bitmap_set_range (bitmap, unsigned int, unsigned int); +extern bool bitmap_ior (bitmap, const_bitmap, const_bitmap); +extern bool bitmap_ior_into (bitmap, const_bitmap); +extern bool bitmap_ior_into_and_free (bitmap, bitmap *); +extern void bitmap_xor (bitmap, const_bitmap, const_bitmap); +extern void bitmap_xor_into (bitmap, const_bitmap); + +/* DST = A | (B & C). Return true if DST changes. */ +extern bool bitmap_ior_and_into (bitmap DST, const_bitmap B, const_bitmap C); +/* DST = A | (B & ~C). Return true if DST changes. */ +extern bool bitmap_ior_and_compl (bitmap DST, const_bitmap A, + const_bitmap B, const_bitmap C); +/* A |= (B & ~C). Return true if A changes. */ +extern bool bitmap_ior_and_compl_into (bitmap A, + const_bitmap B, const_bitmap C); + +/* Clear a single bit in a bitmap. Return true if the bit changed. */ +extern bool bitmap_clear_bit (bitmap, int); + +/* Set a single bit in a bitmap. Return true if the bit changed. */ +extern bool bitmap_set_bit (bitmap, int); + +/* Return true if a bit is set in a bitmap. */ +extern bool bitmap_bit_p (const_bitmap, int); + +/* Set and get multiple bit values in a sparse bitmap. This allows a bitmap to + function as a sparse array of bit patterns where the patterns are + multiples of power of 2. This is more efficient than performing this as + multiple individual operations. */ +void bitmap_set_aligned_chunk (bitmap, unsigned int, unsigned int, BITMAP_WORD); +BITMAP_WORD bitmap_get_aligned_chunk (const_bitmap, unsigned int, unsigned int); + +/* Debug functions to print a bitmap. */ +extern void debug_bitmap (const_bitmap); +extern void debug_bitmap_file (FILE *, const_bitmap); + +/* Print a bitmap. */ +extern void bitmap_print (FILE *, const_bitmap, const char *, const char *); + +/* Initialize and release a bitmap obstack. */ +extern void bitmap_obstack_initialize (bitmap_obstack *); +extern void bitmap_obstack_release (bitmap_obstack *); +extern void bitmap_register (bitmap MEM_STAT_DECL); +extern void dump_bitmap_statistics (void); + +/* Initialize a bitmap header. OBSTACK indicates the bitmap obstack + to allocate from, NULL for GC'd bitmap. */ + +static inline void +bitmap_initialize (bitmap head, bitmap_obstack *obstack CXX_MEM_STAT_INFO) +{ + head->first = head->current = NULL; + head->indx = head->tree_form = 0; + head->padding = 0; + head->alloc_descriptor = 0; + head->obstack = obstack; + if (GATHER_STATISTICS) + bitmap_register (head PASS_MEM_STAT); +} + +/* Release a bitmap (but not its head). This is suitable for pairing with + bitmap_initialize. */ + +static inline void +bitmap_release (bitmap head) +{ + bitmap_clear (head); + /* Poison the obstack pointer so the obstack can be safely released. + Do not zero it as the bitmap then becomes initialized GC. */ + head->obstack = &bitmap_head::crashme; +} + +/* Allocate and free bitmaps from obstack, malloc and gc'd memory. */ +extern bitmap bitmap_alloc (bitmap_obstack *obstack CXX_MEM_STAT_INFO); +#define BITMAP_ALLOC bitmap_alloc +extern bitmap bitmap_gc_alloc (ALONE_CXX_MEM_STAT_INFO); +#define BITMAP_GGC_ALLOC bitmap_gc_alloc +extern void bitmap_obstack_free (bitmap); + +/* A few compatibility/functions macros for compatibility with sbitmaps */ +inline void dump_bitmap (FILE *file, const_bitmap map) +{ + bitmap_print (file, map, "", "\n"); +} +extern void debug (const bitmap_head &ref); +extern void debug (const bitmap_head *ptr); + +extern unsigned bitmap_first_set_bit (const_bitmap); +extern unsigned bitmap_last_set_bit (const_bitmap); + +/* Compute bitmap hash (for purposes of hashing etc.) */ +extern hashval_t bitmap_hash (const_bitmap); + +/* Do any cleanup needed on a bitmap when it is no longer used. */ +#define BITMAP_FREE(BITMAP) \ + ((void) (bitmap_obstack_free ((bitmap) BITMAP), (BITMAP) = (bitmap) NULL)) + +/* Iterator for bitmaps. */ + +struct bitmap_iterator +{ + /* Pointer to the current bitmap element. */ + bitmap_element *elt1; + + /* Pointer to 2nd bitmap element when two are involved. */ + bitmap_element *elt2; + + /* Word within the current element. */ + unsigned word_no; + + /* Contents of the actually processed word. When finding next bit + it is shifted right, so that the actual bit is always the least + significant bit of ACTUAL. */ + BITMAP_WORD bits; +}; + +/* Initialize a single bitmap iterator. START_BIT is the first bit to + iterate from. */ + +static inline void +bmp_iter_set_init (bitmap_iterator *bi, const_bitmap map, + unsigned start_bit, unsigned *bit_no) +{ + bi->elt1 = map->first; + bi->elt2 = NULL; + + gcc_checking_assert (!map->tree_form); + + /* Advance elt1 until it is not before the block containing start_bit. */ + while (1) + { + if (!bi->elt1) + { + bi->elt1 = &bitmap_zero_bits; + break; + } + + if (bi->elt1->indx >= start_bit / BITMAP_ELEMENT_ALL_BITS) + break; + bi->elt1 = bi->elt1->next; + } + + /* We might have gone past the start bit, so reinitialize it. */ + if (bi->elt1->indx != start_bit / BITMAP_ELEMENT_ALL_BITS) + start_bit = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS; + + /* Initialize for what is now start_bit. */ + bi->word_no = start_bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS; + bi->bits = bi->elt1->bits[bi->word_no]; + bi->bits >>= start_bit % BITMAP_WORD_BITS; + + /* If this word is zero, we must make sure we're not pointing at the + first bit, otherwise our incrementing to the next word boundary + will fail. It won't matter if this increment moves us into the + next word. */ + start_bit += !bi->bits; + + *bit_no = start_bit; +} + +/* Initialize an iterator to iterate over the intersection of two + bitmaps. START_BIT is the bit to commence from. */ + +static inline void +bmp_iter_and_init (bitmap_iterator *bi, const_bitmap map1, const_bitmap map2, + unsigned start_bit, unsigned *bit_no) +{ + bi->elt1 = map1->first; + bi->elt2 = map2->first; + + gcc_checking_assert (!map1->tree_form && !map2->tree_form); + + /* Advance elt1 until it is not before the block containing + start_bit. */ + while (1) + { + if (!bi->elt1) + { + bi->elt2 = NULL; + break; + } + + if (bi->elt1->indx >= start_bit / BITMAP_ELEMENT_ALL_BITS) + break; + bi->elt1 = bi->elt1->next; + } + + /* Advance elt2 until it is not before elt1. */ + while (1) + { + if (!bi->elt2) + { + bi->elt1 = bi->elt2 = &bitmap_zero_bits; + break; + } + + if (bi->elt2->indx >= bi->elt1->indx) + break; + bi->elt2 = bi->elt2->next; + } + + /* If we're at the same index, then we have some intersecting bits. */ + if (bi->elt1->indx == bi->elt2->indx) + { + /* We might have advanced beyond the start_bit, so reinitialize + for that. */ + if (bi->elt1->indx != start_bit / BITMAP_ELEMENT_ALL_BITS) + start_bit = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS; + + bi->word_no = start_bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS; + bi->bits = bi->elt1->bits[bi->word_no] & bi->elt2->bits[bi->word_no]; + bi->bits >>= start_bit % BITMAP_WORD_BITS; + } + else + { + /* Otherwise we must immediately advance elt1, so initialize for + that. */ + bi->word_no = BITMAP_ELEMENT_WORDS - 1; + bi->bits = 0; + } + + /* If this word is zero, we must make sure we're not pointing at the + first bit, otherwise our incrementing to the next word boundary + will fail. It won't matter if this increment moves us into the + next word. */ + start_bit += !bi->bits; + + *bit_no = start_bit; +} + +/* Initialize an iterator to iterate over the bits in MAP1 & ~MAP2. */ + +static inline void +bmp_iter_and_compl_init (bitmap_iterator *bi, + const_bitmap map1, const_bitmap map2, + unsigned start_bit, unsigned *bit_no) +{ + bi->elt1 = map1->first; + bi->elt2 = map2->first; + + gcc_checking_assert (!map1->tree_form && !map2->tree_form); + + /* Advance elt1 until it is not before the block containing start_bit. */ + while (1) + { + if (!bi->elt1) + { + bi->elt1 = &bitmap_zero_bits; + break; + } + + if (bi->elt1->indx >= start_bit / BITMAP_ELEMENT_ALL_BITS) + break; + bi->elt1 = bi->elt1->next; + } + + /* Advance elt2 until it is not before elt1. */ + while (bi->elt2 && bi->elt2->indx < bi->elt1->indx) + bi->elt2 = bi->elt2->next; + + /* We might have advanced beyond the start_bit, so reinitialize for + that. */ + if (bi->elt1->indx != start_bit / BITMAP_ELEMENT_ALL_BITS) + start_bit = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS; + + bi->word_no = start_bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS; + bi->bits = bi->elt1->bits[bi->word_no]; + if (bi->elt2 && bi->elt1->indx == bi->elt2->indx) + bi->bits &= ~bi->elt2->bits[bi->word_no]; + bi->bits >>= start_bit % BITMAP_WORD_BITS; + + /* If this word is zero, we must make sure we're not pointing at the + first bit, otherwise our incrementing to the next word boundary + will fail. It won't matter if this increment moves us into the + next word. */ + start_bit += !bi->bits; + + *bit_no = start_bit; +} + +/* Advance to the next bit in BI. We don't advance to the next + nonzero bit yet. */ + +static inline void +bmp_iter_next (bitmap_iterator *bi, unsigned *bit_no) +{ + bi->bits >>= 1; + *bit_no += 1; +} + +/* Advance to first set bit in BI. */ + +static inline void +bmp_iter_next_bit (bitmap_iterator * bi, unsigned *bit_no) +{ +#if (GCC_VERSION >= 3004) + { + unsigned int n = __builtin_ctzl (bi->bits); + gcc_assert (sizeof (unsigned long) == sizeof (BITMAP_WORD)); + bi->bits >>= n; + *bit_no += n; + } +#else + while (!(bi->bits & 1)) + { + bi->bits >>= 1; + *bit_no += 1; + } +#endif +} + +/* Advance to the next nonzero bit of a single bitmap, we will have + already advanced past the just iterated bit. Return true if there + is a bit to iterate. */ + +static inline bool +bmp_iter_set (bitmap_iterator *bi, unsigned *bit_no) +{ + /* If our current word is nonzero, it contains the bit we want. */ + if (bi->bits) + { + next_bit: + bmp_iter_next_bit (bi, bit_no); + return true; + } + + /* Round up to the word boundary. We might have just iterated past + the end of the last word, hence the -1. It is not possible for + bit_no to point at the beginning of the now last word. */ + *bit_no = ((*bit_no + BITMAP_WORD_BITS - 1) + / BITMAP_WORD_BITS * BITMAP_WORD_BITS); + bi->word_no++; + + while (1) + { + /* Find the next nonzero word in this elt. */ + while (bi->word_no != BITMAP_ELEMENT_WORDS) + { + bi->bits = bi->elt1->bits[bi->word_no]; + if (bi->bits) + goto next_bit; + *bit_no += BITMAP_WORD_BITS; + bi->word_no++; + } + + /* Make sure we didn't remove the element while iterating. */ + gcc_checking_assert (bi->elt1->indx != -1U); + + /* Advance to the next element. */ + bi->elt1 = bi->elt1->next; + if (!bi->elt1) + return false; + *bit_no = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS; + bi->word_no = 0; + } +} + +/* Advance to the next nonzero bit of an intersecting pair of + bitmaps. We will have already advanced past the just iterated bit. + Return true if there is a bit to iterate. */ + +static inline bool +bmp_iter_and (bitmap_iterator *bi, unsigned *bit_no) +{ + /* If our current word is nonzero, it contains the bit we want. */ + if (bi->bits) + { + next_bit: + bmp_iter_next_bit (bi, bit_no); + return true; + } + + /* Round up to the word boundary. We might have just iterated past + the end of the last word, hence the -1. It is not possible for + bit_no to point at the beginning of the now last word. */ + *bit_no = ((*bit_no + BITMAP_WORD_BITS - 1) + / BITMAP_WORD_BITS * BITMAP_WORD_BITS); + bi->word_no++; + + while (1) + { + /* Find the next nonzero word in this elt. */ + while (bi->word_no != BITMAP_ELEMENT_WORDS) + { + bi->bits = bi->elt1->bits[bi->word_no] & bi->elt2->bits[bi->word_no]; + if (bi->bits) + goto next_bit; + *bit_no += BITMAP_WORD_BITS; + bi->word_no++; + } + + /* Advance to the next identical element. */ + do + { + /* Make sure we didn't remove the element while iterating. */ + gcc_checking_assert (bi->elt1->indx != -1U); + + /* Advance elt1 while it is less than elt2. We always want + to advance one elt. */ + do + { + bi->elt1 = bi->elt1->next; + if (!bi->elt1) + return false; + } + while (bi->elt1->indx < bi->elt2->indx); + + /* Make sure we didn't remove the element while iterating. */ + gcc_checking_assert (bi->elt2->indx != -1U); + + /* Advance elt2 to be no less than elt1. This might not + advance. */ + while (bi->elt2->indx < bi->elt1->indx) + { + bi->elt2 = bi->elt2->next; + if (!bi->elt2) + return false; + } + } + while (bi->elt1->indx != bi->elt2->indx); + + *bit_no = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS; + bi->word_no = 0; + } +} + +/* Advance to the next nonzero bit in the intersection of + complemented bitmaps. We will have already advanced past the just + iterated bit. */ + +static inline bool +bmp_iter_and_compl (bitmap_iterator *bi, unsigned *bit_no) +{ + /* If our current word is nonzero, it contains the bit we want. */ + if (bi->bits) + { + next_bit: + bmp_iter_next_bit (bi, bit_no); + return true; + } + + /* Round up to the word boundary. We might have just iterated past + the end of the last word, hence the -1. It is not possible for + bit_no to point at the beginning of the now last word. */ + *bit_no = ((*bit_no + BITMAP_WORD_BITS - 1) + / BITMAP_WORD_BITS * BITMAP_WORD_BITS); + bi->word_no++; + + while (1) + { + /* Find the next nonzero word in this elt. */ + while (bi->word_no != BITMAP_ELEMENT_WORDS) + { + bi->bits = bi->elt1->bits[bi->word_no]; + if (bi->elt2 && bi->elt2->indx == bi->elt1->indx) + bi->bits &= ~bi->elt2->bits[bi->word_no]; + if (bi->bits) + goto next_bit; + *bit_no += BITMAP_WORD_BITS; + bi->word_no++; + } + + /* Make sure we didn't remove the element while iterating. */ + gcc_checking_assert (bi->elt1->indx != -1U); + + /* Advance to the next element of elt1. */ + bi->elt1 = bi->elt1->next; + if (!bi->elt1) + return false; + + /* Make sure we didn't remove the element while iterating. */ + gcc_checking_assert (! bi->elt2 || bi->elt2->indx != -1U); + + /* Advance elt2 until it is no less than elt1. */ + while (bi->elt2 && bi->elt2->indx < bi->elt1->indx) + bi->elt2 = bi->elt2->next; + + *bit_no = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS; + bi->word_no = 0; + } +} + +/* If you are modifying a bitmap you are currently iterating over you + have to ensure to + - never remove the current bit; + - if you set or clear a bit before the current bit this operation + will not affect the set of bits you are visiting during the iteration; + - if you set or clear a bit after the current bit it is unspecified + whether that affects the set of bits you are visiting during the + iteration. + If you want to remove the current bit you can delay this to the next + iteration (and after the iteration in case the last iteration is + affected). */ + +/* Loop over all bits set in BITMAP, starting with MIN and setting + BITNUM to the bit number. ITER is a bitmap iterator. BITNUM + should be treated as a read-only variable as it contains loop + state. */ + +#ifndef EXECUTE_IF_SET_IN_BITMAP +/* See sbitmap.h for the other definition of EXECUTE_IF_SET_IN_BITMAP. */ +#define EXECUTE_IF_SET_IN_BITMAP(BITMAP, MIN, BITNUM, ITER) \ + for (bmp_iter_set_init (&(ITER), (BITMAP), (MIN), &(BITNUM)); \ + bmp_iter_set (&(ITER), &(BITNUM)); \ + bmp_iter_next (&(ITER), &(BITNUM))) +#endif + +/* Loop over all the bits set in BITMAP1 & BITMAP2, starting with MIN + and setting BITNUM to the bit number. ITER is a bitmap iterator. + BITNUM should be treated as a read-only variable as it contains + loop state. */ + +#define EXECUTE_IF_AND_IN_BITMAP(BITMAP1, BITMAP2, MIN, BITNUM, ITER) \ + for (bmp_iter_and_init (&(ITER), (BITMAP1), (BITMAP2), (MIN), \ + &(BITNUM)); \ + bmp_iter_and (&(ITER), &(BITNUM)); \ + bmp_iter_next (&(ITER), &(BITNUM))) + +/* Loop over all the bits set in BITMAP1 & ~BITMAP2, starting with MIN + and setting BITNUM to the bit number. ITER is a bitmap iterator. + BITNUM should be treated as a read-only variable as it contains + loop state. */ + +#define EXECUTE_IF_AND_COMPL_IN_BITMAP(BITMAP1, BITMAP2, MIN, BITNUM, ITER) \ + for (bmp_iter_and_compl_init (&(ITER), (BITMAP1), (BITMAP2), (MIN), \ + &(BITNUM)); \ + bmp_iter_and_compl (&(ITER), &(BITNUM)); \ + bmp_iter_next (&(ITER), &(BITNUM))) + +/* A class that ties the lifetime of a bitmap to its scope. */ +class auto_bitmap +{ + public: + auto_bitmap (ALONE_CXX_MEM_STAT_INFO) + { bitmap_initialize (&m_bits, &bitmap_default_obstack PASS_MEM_STAT); } + explicit auto_bitmap (bitmap_obstack *o CXX_MEM_STAT_INFO) + { bitmap_initialize (&m_bits, o PASS_MEM_STAT); } + ~auto_bitmap () { bitmap_clear (&m_bits); } + // Allow calling bitmap functions on our bitmap. + operator bitmap () { return &m_bits; } + + private: + // Prevent making a copy that references our bitmap. + auto_bitmap (const auto_bitmap &); + auto_bitmap &operator = (const auto_bitmap &); + auto_bitmap (auto_bitmap &&); + auto_bitmap &operator = (auto_bitmap &&); + + bitmap_head m_bits; +}; + +extern void debug (const auto_bitmap &ref); +extern void debug (const auto_bitmap *ptr); + +/* Base class for bitmap_view; see there for details. */ +template > +class base_bitmap_view +{ +public: + typedef typename Traits::element_type array_element_type; + + base_bitmap_view (const T &, bitmap_element *); + operator const_bitmap () const { return &m_head; } + +private: + base_bitmap_view (const base_bitmap_view &); + + bitmap_head m_head; +}; + +/* Provides a read-only bitmap view of a single integer bitmask or a + constant-sized array of integer bitmasks, or of a wrapper around such + bitmasks. */ +template +class bitmap_view : public base_bitmap_view +{ +public: + bitmap_view (const T &array) + : base_bitmap_view (array, m_bitmap_elements) {} + +private: + /* How many bitmap_elements we need to hold a full T. */ + static const size_t num_bitmap_elements + = CEIL (CHAR_BIT + * sizeof (typename Traits::element_type) + * Traits::constant_size, + BITMAP_ELEMENT_ALL_BITS); + bitmap_element m_bitmap_elements[num_bitmap_elements]; +}; + +/* Initialize the view for array ARRAY, using the array of bitmap + elements in BITMAP_ELEMENTS (which is known to contain enough + entries). */ +template +base_bitmap_view::base_bitmap_view (const T &array, + bitmap_element *bitmap_elements) +{ + m_head.obstack = NULL; + + /* The code currently assumes that each element of ARRAY corresponds + to exactly one bitmap_element. */ + const size_t array_element_bits = CHAR_BIT * sizeof (array_element_type); + STATIC_ASSERT (BITMAP_ELEMENT_ALL_BITS % array_element_bits == 0); + size_t array_step = BITMAP_ELEMENT_ALL_BITS / array_element_bits; + size_t array_size = Traits::size (array); + + /* Process each potential bitmap_element in turn. The loop is written + this way rather than per array element because usually there are + only a small number of array elements per bitmap element (typically + two or four). The inner loops should therefore unroll completely. */ + const array_element_type *array_elements = Traits::base (array); + unsigned int indx = 0; + for (size_t array_base = 0; + array_base < array_size; + array_base += array_step, indx += 1) + { + /* How many array elements are in this particular bitmap_element. */ + unsigned int array_count + = (STATIC_CONSTANT_P (array_size % array_step == 0) + ? array_step : MIN (array_step, array_size - array_base)); + + /* See whether we need this bitmap element. */ + array_element_type ior = array_elements[array_base]; + for (size_t i = 1; i < array_count; ++i) + ior |= array_elements[array_base + i]; + if (ior == 0) + continue; + + /* Grab the next bitmap element and chain it. */ + bitmap_element *bitmap_element = bitmap_elements++; + if (m_head.current) + m_head.current->next = bitmap_element; + else + m_head.first = bitmap_element; + bitmap_element->prev = m_head.current; + bitmap_element->next = NULL; + bitmap_element->indx = indx; + m_head.current = bitmap_element; + m_head.indx = indx; + + /* Fill in the bits of the bitmap element. */ + if (array_element_bits < BITMAP_WORD_BITS) + { + /* Multiple array elements fit in one element of + bitmap_element->bits. */ + size_t array_i = array_base; + for (unsigned int word_i = 0; word_i < BITMAP_ELEMENT_WORDS; + ++word_i) + { + BITMAP_WORD word = 0; + for (unsigned int shift = 0; + shift < BITMAP_WORD_BITS && array_i < array_size; + shift += array_element_bits) + word |= array_elements[array_i++] << shift; + bitmap_element->bits[word_i] = word; + } + } + else + { + /* Array elements are the same size as elements of + bitmap_element->bits, or are an exact multiple of that size. */ + unsigned int word_i = 0; + for (unsigned int i = 0; i < array_count; ++i) + for (unsigned int shift = 0; shift < array_element_bits; + shift += BITMAP_WORD_BITS) + bitmap_element->bits[word_i++] + = array_elements[array_base + i] >> shift; + while (word_i < BITMAP_ELEMENT_WORDS) + bitmap_element->bits[word_i++] = 0; + } + } +} + +#endif /* GCC_BITMAP_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/builtin-attrs.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/builtin-attrs.def new file mode 100644 index 0000000..f03dfec --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/builtin-attrs.def @@ -0,0 +1,427 @@ +/* Copyright (C) 2001-2022 Free Software Foundation, Inc. + Contributed by Joseph Myers . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* This header provides a declarative way of describing the attributes + that are applied to some built-in functions by default. Attributes + that are meant to be used by user-defined functions but aren't used + by any built-ins, or attributes that apply to types or variables + but not to functions need not and should not be defined here. + + Before including this header, you must define the following macros. + In each case where there is an ENUM, it is an identifier used to + reference the tree in subsequent definitions. + + DEF_ATTR_NULL_TREE (ENUM) + + Constructs a NULL_TREE. + + DEF_ATTR_INT (ENUM, VALUE) + + Constructs an INTEGER_CST with value VALUE (an integer representable + in HOST_WIDE_INT). + + DEF_ATTR_IDENT (ENUM, STRING) + + Constructs an IDENTIFIER_NODE for STRING. + + DEF_ATTR_TREE_LIST (ENUM, PURPOSE, VALUE, CHAIN) + + Constructs a TREE_LIST with given PURPOSE, VALUE and CHAIN (given + as previous ENUM names). */ + +DEF_ATTR_NULL_TREE (ATTR_NULL) + +/* Construct a tree for a given integer and a list containing it. */ +#define DEF_ATTR_FOR_INT(VALUE) \ + DEF_ATTR_INT (ATTR_##VALUE, VALUE) \ + DEF_ATTR_TREE_LIST (ATTR_LIST_##VALUE, ATTR_NULL, \ + ATTR_##VALUE, ATTR_NULL) +DEF_ATTR_FOR_INT (0) +DEF_ATTR_FOR_INT (1) +DEF_ATTR_FOR_INT (2) +DEF_ATTR_FOR_INT (3) +DEF_ATTR_FOR_INT (4) +DEF_ATTR_FOR_INT (5) +DEF_ATTR_FOR_INT (6) +#undef DEF_ATTR_FOR_INT + +/* Construct a tree for a given string and a list containing it. */ +#define DEF_ATTR_FOR_STRING(ENUM, VALUE) \ + DEF_ATTR_STRING (ATTR_##ENUM, VALUE) \ + DEF_ATTR_TREE_LIST (ATTR_LIST_##ENUM, ATTR_NULL, \ + ATTR_##ENUM, ATTR_NULL) +DEF_ATTR_FOR_STRING (STR1, "1 ") +DEF_ATTR_FOR_STRING (STRERRNOC, ".C") +DEF_ATTR_FOR_STRING (STRERRNOP, ".P") +#undef DEF_ATTR_FOR_STRING + +/* Construct a tree for a list of two integers. */ +#define DEF_LIST_INT_INT(VALUE1, VALUE2) \ + DEF_ATTR_TREE_LIST (ATTR_LIST_##VALUE1##_##VALUE2, ATTR_NULL, \ + ATTR_##VALUE1, ATTR_LIST_##VALUE2) +DEF_LIST_INT_INT (1,0) +DEF_LIST_INT_INT (1,2) +DEF_LIST_INT_INT (1,3) +DEF_LIST_INT_INT (1,4) +DEF_LIST_INT_INT (1,5) +DEF_LIST_INT_INT (2,0) +DEF_LIST_INT_INT (2,3) +DEF_LIST_INT_INT (3,0) +DEF_LIST_INT_INT (3,4) +DEF_LIST_INT_INT (4,0) +DEF_LIST_INT_INT (4,5) +DEF_LIST_INT_INT (5,0) +DEF_LIST_INT_INT (5,6) +#undef DEF_LIST_INT_INT + +/* Construct trees for identifiers used in built-in function attributes. + The construction contributes to startup costs so only attributes that + are used to define built-ins should be defined here. */ +DEF_ATTR_IDENT (ATTR_ALLOC_SIZE, "alloc_size") +DEF_ATTR_IDENT (ATTR_COLD, "cold") +DEF_ATTR_IDENT (ATTR_CONST, "const") +DEF_ATTR_IDENT (ATTR_FORMAT, "format") +DEF_ATTR_IDENT (ATTR_FORMAT_ARG, "format_arg") +DEF_ATTR_IDENT (ATTR_MALLOC, "malloc") +DEF_ATTR_IDENT (ATTR_NONNULL, "nonnull") +DEF_ATTR_IDENT (ATTR_NORETURN, "noreturn") +DEF_ATTR_IDENT (ATTR_NOTHROW, "nothrow") +DEF_ATTR_IDENT (ATTR_LEAF, "leaf") +DEF_ATTR_IDENT (ATTR_FNSPEC, "fn spec") +DEF_ATTR_IDENT (ATTR_PRINTF, "printf") +DEF_ATTR_IDENT (ATTR_ASM_FPRINTF, "asm_fprintf") +DEF_ATTR_IDENT (ATTR_GCC_DIAG, "gcc_diag") +DEF_ATTR_IDENT (ATTR_GCC_CDIAG, "gcc_cdiag") +DEF_ATTR_IDENT (ATTR_GCC_CXXDIAG, "gcc_cxxdiag") +DEF_ATTR_IDENT (ATTR_PURE, "pure") +DEF_ATTR_IDENT (ATTR_NOVOPS, "no vops") +DEF_ATTR_IDENT (ATTR_SCANF, "scanf") +DEF_ATTR_IDENT (ATTR_SENTINEL, "sentinel") +DEF_ATTR_IDENT (ATTR_STRFMON, "strfmon") +DEF_ATTR_IDENT (ATTR_STRFTIME, "strftime") +DEF_ATTR_IDENT (ATTR_TYPEGENERIC, "type generic") +DEF_ATTR_IDENT (ATTR_TM_REGPARM, "*tm regparm") +DEF_ATTR_IDENT (ATTR_TM_TMPURE, "transaction_pure") +DEF_ATTR_IDENT (ATTR_RETURNS_TWICE, "returns_twice") +DEF_ATTR_IDENT (ATTR_RETURNS_NONNULL, "returns_nonnull") +DEF_ATTR_IDENT (ATTR_WARN_UNUSED_RESULT, "warn_unused_result") + +DEF_ATTR_TREE_LIST (ATTR_NOVOPS_LIST, ATTR_NOVOPS, ATTR_NULL, ATTR_NULL) + +DEF_ATTR_TREE_LIST (ATTR_NOVOPS_LEAF_LIST, ATTR_LEAF, ATTR_NULL, ATTR_NOVOPS_LIST) + +DEF_ATTR_TREE_LIST (ATTR_LEAF_LIST, ATTR_LEAF, ATTR_NULL, ATTR_NULL) + +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_LIST, ATTR_NOTHROW, ATTR_NULL, ATTR_NULL) + +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_LEAF_LIST, ATTR_LEAF, ATTR_NULL, ATTR_NOTHROW_LIST) + +DEF_ATTR_TREE_LIST (ATTR_NOVOPS_NOTHROW_LEAF_LIST, ATTR_NOVOPS, \ + ATTR_NULL, ATTR_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_LIST, ATTR_CONST, \ + ATTR_NULL, ATTR_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_LEAF_LIST, ATTR_CONST, \ + ATTR_NULL, ATTR_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_ERRNOCONST_NOTHROW_LEAF_LIST, ATTR_FNSPEC,\ + ATTR_LIST_STRERRNOC, ATTR_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_ERRNOPURE_NOTHROW_LEAF_LIST, ATTR_FNSPEC,\ + ATTR_LIST_STRERRNOP, ATTR_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_LIST, ATTR_PURE, \ + ATTR_NULL, ATTR_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_LEAF_LIST, ATTR_PURE, \ + ATTR_NULL, ATTR_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_NORETURN_NOTHROW_LIST, ATTR_NORETURN, \ + ATTR_NULL, ATTR_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_NORETURN_NOTHROW_LEAF_LIST, ATTR_NORETURN,\ + ATTR_NULL, ATTR_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_NORETURN_NOTHROW_LEAF_COLD_LIST, ATTR_COLD,\ + ATTR_NULL, ATTR_NORETURN_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_RT_NOTHROW_LEAF_LIST, ATTR_RETURNS_TWICE,\ + ATTR_NULL, ATTR_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_COLD_NOTHROW_LEAF_LIST, ATTR_COLD,\ + ATTR_NULL, ATTR_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST, ATTR_COLD,\ + ATTR_NULL, ATTR_NORETURN_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_CONST_NORETURN_NOTHROW_LEAF_LIST, ATTR_CONST,\ + ATTR_NULL, ATTR_NORETURN_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_CONST_NORETURN_NOTHROW_LEAF_COLD_LIST, ATTR_COLD,\ + ATTR_NULL, ATTR_CONST_NORETURN_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_MALLOC_NOTHROW_LIST, ATTR_MALLOC, \ + ATTR_NULL, ATTR_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_WARN_UNUSED_RESULT_NOTHROW_LEAF_LIST, ATTR_WARN_UNUSED_RESULT, \ + ATTR_NULL, ATTR_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_LEAF_LIST, ATTR_MALLOC, \ + ATTR_NULL, ATTR_WARN_UNUSED_RESULT_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_SENTINEL_NOTHROW_LIST, ATTR_SENTINEL, \ + ATTR_NULL, ATTR_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_SENTINEL_NOTHROW_LEAF_LIST, ATTR_SENTINEL, \ + ATTR_NULL, ATTR_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_COLD_CONST_NORETURN_NOTHROW_LEAF_LIST, ATTR_CONST,\ + ATTR_NULL, ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST) + +/* Allocation functions like malloc and realloc whose first argument + with _SIZE_1, or second argument with _SIZE_2, specifies the size + of the allocated object. */ +DEF_ATTR_TREE_LIST (ATTR_MALLOC_SIZE_1_NOTHROW_LIST, ATTR_ALLOC_SIZE, \ + ATTR_LIST_1, ATTR_MALLOC_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_LIST, ATTR_WARN_UNUSED_RESULT, \ + ATTR_NULL, ATTR_MALLOC_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_ALLOC_WARN_UNUSED_RESULT_SIZE_2_NOTHROW_LIST, ATTR_ALLOC_SIZE, \ + ATTR_LIST_2, ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_MALLOC_WARN_UNUSED_RESULT_SIZE_1_NOTHROW_LEAF_LIST, ATTR_ALLOC_SIZE, \ + ATTR_LIST_1, ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_LEAF_LIST) +/* Alloca is just like malloc except that it never returns null. */ +DEF_ATTR_TREE_LIST (ATTR_ALLOCA_WARN_UNUSED_RESULT_SIZE_1_NOTHROW_LEAF_LIST, ATTR_RETURNS_NONNULL, + ATTR_NULL, ATTR_MALLOC_WARN_UNUSED_RESULT_SIZE_1_NOTHROW_LEAF_LIST) + +/* Allocation functions like calloc the product of whose first two arguments + specifies the size of the allocated object. */ +DEF_ATTR_TREE_LIST (ATTR_MALLOC_WARN_UNUSED_RESULT_SIZE_1_2_NOTHROW_LEAF_LIST, ATTR_ALLOC_SIZE, \ + ATTR_LIST_1_2, ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_LEAF_LIST) + +/* Allocation functions like realloc whose second argument specifies + the size of the allocated object. */ +DEF_ATTR_TREE_LIST (ATTR_ALLOC_WARN_UNUSED_RESULT_SIZE_2_NOTHROW_LEAF_LIST, ATTR_ALLOC_SIZE, \ + ATTR_LIST_2, ATTR_WARN_UNUSED_RESULT_NOTHROW_LEAF_LIST) + +/* Functions whose pointer parameter(s) are all nonnull. */ +DEF_ATTR_TREE_LIST (ATTR_NONNULL_LIST, ATTR_NONNULL, ATTR_NULL, ATTR_NULL) +/* Functions whose first parameter is a nonnull pointer. */ +DEF_ATTR_TREE_LIST (ATTR_NONNULL_1, ATTR_NONNULL, ATTR_LIST_1, ATTR_NULL) +/* Functions whose second parameter is a nonnull pointer. */ +DEF_ATTR_TREE_LIST (ATTR_NONNULL_2, ATTR_NONNULL, ATTR_LIST_2, ATTR_NULL) +/* Functions whose third parameter is a nonnull pointer. */ +DEF_ATTR_TREE_LIST (ATTR_NONNULL_3, ATTR_NONNULL, ATTR_LIST_3, ATTR_NULL) +/* Nothrow functions with the sentinel(1) attribute. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_SENTINEL_1, ATTR_SENTINEL, ATTR_LIST_1, \ + ATTR_NOTHROW_LIST) +/* Nothrow functions whose pointer parameter(s) are all nonnull. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL, ATTR_NONNULL, ATTR_NULL, \ + ATTR_NOTHROW_LIST) +/* Nothrow leaf functions whose pointer parameter(s) are all nonnull. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_LEAF, ATTR_NONNULL, ATTR_NULL, \ + ATTR_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_LEAF_LIST, ATTR_LEAF, ATTR_NULL, ATTR_NOTHROW_NONNULL_LEAF) +/* Nothrow functions whose first parameter is a nonnull pointer. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1, ATTR_NONNULL, ATTR_LIST_1, \ + ATTR_NOTHROW_LIST) +/* Nothrow functions whose second parameter is a nonnull pointer. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_2, ATTR_NONNULL, ATTR_LIST_2, \ + ATTR_NOTHROW_LIST) +/* Nothrow functions whose third parameter is a nonnull pointer. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_3, ATTR_NONNULL, ATTR_LIST_3, \ + ATTR_NOTHROW_LIST) +/* Nothrow functions whose fourth parameter is a nonnull pointer. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_4, ATTR_NONNULL, ATTR_LIST_4, \ + ATTR_NOTHROW_LIST) +/* Nothrow functions whose fifth parameter is a nonnull pointer. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_5, ATTR_NONNULL, ATTR_LIST_5, \ + ATTR_NOTHROW_LIST) + +/* Same as ATTR_NONNULL_1. */ +DEF_ATTR_TREE_LIST (ATTR_NONNULL_1_1, ATTR_NONNULL, ATTR_LIST_1, ATTR_NULL) +/* Functions like {v,}fprintf whose first and second parameters are + nonnull pointers. As cancellation points the functions are not + nothrow. */ +DEF_ATTR_TREE_LIST (ATTR_NONNULL_1_2, ATTR_NONNULL, ATTR_LIST_1_2, ATTR_NULL) +/* The following don't have {v,}fprintf forms. They exist only to + make it possible to declare {v,}{f,s}printf attributes using + the same macro. */ +DEF_ATTR_TREE_LIST (ATTR_NONNULL_1_3, ATTR_NONNULL, ATTR_LIST_1_3, ATTR_NULL) +DEF_ATTR_TREE_LIST (ATTR_NONNULL_1_4, ATTR_NONNULL, ATTR_LIST_1_4, ATTR_NULL) +DEF_ATTR_TREE_LIST (ATTR_NONNULL_1_5, ATTR_NONNULL, ATTR_LIST_1_5, ATTR_NULL) + +/* Same as ATTR_NOTHROW_NONNULL_1. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_1, ATTR_NONNULL, ATTR_LIST_1, + ATTR_NOTHROW_LIST) +/* Nothrow functions like {v,}sprintf whose first and second parameters + are nonnull pointers. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_2, ATTR_NONNULL, ATTR_LIST_1_2, \ + ATTR_NOTHROW_LIST) +/* Nothrow functions like {v,}snprintf whose first and third parameters + are nonnull pointers. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_3, ATTR_NONNULL, ATTR_LIST_1_3, \ + ATTR_NOTHROW_LIST) +/* Nothrow functions like {v,}sprintf_chk whose first and fourth parameters + are nonnull pointers. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_4, ATTR_NONNULL, ATTR_LIST_1_4, \ + ATTR_NOTHROW_LIST) +/* Nothrow functions like {v,}snprintf_chk whose first and fifth parameters + are nonnull pointers. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_5, ATTR_NONNULL, ATTR_LIST_1_5, \ + ATTR_NOTHROW_LIST) + +/* Nothrow leaf functions which are type-generic. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_TYPEGENERIC_LEAF, ATTR_TYPEGENERIC, ATTR_NULL, \ + ATTR_NOTHROW_LEAF_LIST) +/* Nothrow nonnull leaf functions that are type-generic. */ +DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_TYPEGENERIC_LEAF, + ATTR_TYPEGENERIC, ATTR_NULL, + ATTR_NOTHROW_NONNULL_LEAF) +/* Nothrow const functions whose pointer parameter(s) are all nonnull. */ +DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_NONNULL, ATTR_CONST, ATTR_NULL, \ + ATTR_NOTHROW_NONNULL) +/* Nothrow leaf functions whose pointer parameter(s) are all nonnull, + and which return their first argument. */ +DEF_ATTR_TREE_LIST (ATTR_RET1_NOTHROW_NONNULL_LEAF, ATTR_FNSPEC, ATTR_LIST_STR1, \ + ATTR_NOTHROW_NONNULL_LEAF) +/* Nothrow leaf functions whose pointer parameter(s) are all nonnull, + and return value is also nonnull. */ +DEF_ATTR_TREE_LIST (ATTR_RETNONNULL_NOTHROW_LEAF, ATTR_RETURNS_NONNULL, ATTR_NULL, \ + ATTR_NOTHROW_NONNULL_LEAF) +/* Nothrow const leaf functions whose pointer parameter(s) are all nonnull. */ +DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_NONNULL_LEAF, ATTR_CONST, ATTR_NULL, \ + ATTR_NOTHROW_NONNULL_LEAF) +/* Nothrow const functions which are type-generic. */ +DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_TYPEGENERIC, ATTR_TYPEGENERIC, ATTR_NULL, \ + ATTR_CONST_NOTHROW_LIST) +/* Nothrow const leaf functions which are type-generic. */ +DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF, ATTR_TYPEGENERIC, ATTR_NULL, \ + ATTR_CONST_NOTHROW_LEAF_LIST) +/* Nothrow pure functions whose pointer parameter(s) are all nonnull. */ +DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_NONNULL, ATTR_PURE, ATTR_NULL, \ + ATTR_NOTHROW_NONNULL) +/* Nothrow pure leaf functions whose pointer parameter(s) are all nonnull. */ +DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_NONNULL_LEAF, ATTR_PURE, ATTR_NULL, \ + ATTR_NOTHROW_NONNULL_LEAF) +/* Nothrow malloc functions whose pointer parameter(s) are all nonnull. */ +DEF_ATTR_TREE_LIST (ATTR_WARN_UNUSED_RESULT_NOTHROW_NONNULL, ATTR_WARN_UNUSED_RESULT, ATTR_NULL, \ + ATTR_NOTHROW_NONNULL) +DEF_ATTR_TREE_LIST (ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_NONNULL, ATTR_MALLOC, ATTR_NULL, \ + ATTR_WARN_UNUSED_RESULT_NOTHROW_NONNULL) +/* Nothrow malloc leaf functions whose pointer parameter(s) are all nonnull. */ +DEF_ATTR_TREE_LIST (ATTR_WARN_UNUSED_RESULT_NOTHROW_NONNULL_LEAF, ATTR_WARN_UNUSED_RESULT, ATTR_NULL, \ + ATTR_NOTHROW_NONNULL_LEAF) +DEF_ATTR_TREE_LIST (ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_NONNULL_LEAF, ATTR_MALLOC, ATTR_NULL, \ + ATTR_WARN_UNUSED_RESULT_NOTHROW_NONNULL_LEAF) + +/* Construct a tree for the format attribute (and implicitly nonnull). */ +#define DEF_FORMAT_ATTRIBUTE(TYPE, FA, VALUES) \ + DEF_ATTR_TREE_LIST (ATTR_##TYPE##_##VALUES, ATTR_NULL, \ + ATTR_##TYPE, ATTR_LIST_##VALUES) \ + DEF_ATTR_TREE_LIST (ATTR_FORMAT_##TYPE##_##VALUES, ATTR_FORMAT, \ + ATTR_##TYPE##_##VALUES, ATTR_NONNULL_##FA) + +/* Construct a tree for the format and nothrow attributes (format + implies nonnull). */ +#define DEF_FORMAT_ATTRIBUTE_NOTHROW(TYPE, FA, VALUES) \ + DEF_ATTR_TREE_LIST (ATTR_##TYPE##_##VALUES, ATTR_NULL, \ + ATTR_##TYPE, ATTR_LIST_##VALUES) \ + DEF_ATTR_TREE_LIST (ATTR_FORMAT_##TYPE##_NOTHROW_##VALUES, ATTR_FORMAT,\ + ATTR_##TYPE##_##VALUES, ATTR_NOTHROW_NONNULL_##FA) + +/* Construct one tree for the format attribute and another for the format + and nothrow attributes (in both cases format implies nonnull). */ +#define DEF_FORMAT_ATTRIBUTE_BOTH(TYPE, FA, VALUES) \ + DEF_ATTR_TREE_LIST (ATTR_##TYPE##_##VALUES, ATTR_NULL, \ + ATTR_##TYPE, ATTR_LIST_##VALUES) \ + DEF_ATTR_TREE_LIST (ATTR_FORMAT_##TYPE##_##VALUES, ATTR_FORMAT, \ + ATTR_##TYPE##_##VALUES, ATTR_NONNULL_##FA) \ + DEF_ATTR_TREE_LIST (ATTR_FORMAT_##TYPE##_NOTHROW_##VALUES, ATTR_FORMAT,\ + ATTR_##TYPE##_##VALUES, ATTR_NOTHROW_NONNULL_##FA) + +/* Construct a pair of trees for the nonnull attribute for the first + argument, plus format printf attribute (format implies nonnull): + the first ordinary and the second nothrow. */ +#define DEF_FORMAT_ATTRIBUTE_NONNULL(TYPE, FA, VALUES) \ + DEF_ATTR_TREE_LIST (ATTR_NONNULL_1_FORMAT_##TYPE##_##VALUES, \ + ATTR_FORMAT, ATTR_##TYPE##_##VALUES, \ + ATTR_NONNULL_1_##FA) \ + DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_FORMAT_##TYPE##_##VALUES, \ + ATTR_FORMAT, ATTR_##TYPE##_##VALUES, \ + ATTR_NOTHROW_NONNULL_1_##FA) + +DEF_FORMAT_ATTRIBUTE(PRINTF,1,1_0) +DEF_FORMAT_ATTRIBUTE(PRINTF,1,1_2) +DEF_FORMAT_ATTRIBUTE_BOTH(PRINTF,2,2_0) +DEF_FORMAT_ATTRIBUTE_BOTH(PRINTF,2,2_3) +DEF_FORMAT_ATTRIBUTE_BOTH(PRINTF,3,3_0) +DEF_FORMAT_ATTRIBUTE_BOTH(PRINTF,3,3_4) +DEF_FORMAT_ATTRIBUTE_NOTHROW(PRINTF,4,4_0) +DEF_FORMAT_ATTRIBUTE_NOTHROW(PRINTF,4,4_5) +DEF_FORMAT_ATTRIBUTE_NOTHROW(PRINTF,5,5_0) +DEF_FORMAT_ATTRIBUTE_NOTHROW(PRINTF,5,5_6) + +/* Attributes for fprintf(f, f, va). */ +DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,1,1_2) +/* Attributes for v{f,s}printf(d, f, va). vsprintf is nothrow, vfprintf + is not. */ +DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,2,2_0) +/* Attributes for {f,s}printf(d, f, ...). sprintf is nothrow, fprintf + is not. */ +DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,2,2_3) +/* Attributes for vprintf_chk. */ +DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,3,3_0) +/* Attributes for printf_chk. */ +DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,3,3_4) +/* Attributes for v{f,s}printf_chk(d, t, bos, f, va). vsprintf_chk is + nothrow, vfprintf_chk is not. */ +DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,4,4_0) +/* Attributes for {f,s}printf_chk(d, t, bos, f, ...). sprintf_chk is + nothrow, fprintf_chk is not. */ +DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,4,4_5) + +DEF_FORMAT_ATTRIBUTE(SCANF,1,1_0) +DEF_FORMAT_ATTRIBUTE(SCANF,1,1_2) +DEF_FORMAT_ATTRIBUTE_BOTH(SCANF,2,2_0) +DEF_FORMAT_ATTRIBUTE_BOTH(SCANF,2,2_3) +DEF_FORMAT_ATTRIBUTE_NOTHROW(STRFTIME,3,3_0) +DEF_FORMAT_ATTRIBUTE_NOTHROW(STRFMON,3,3_4) +#undef DEF_FORMAT_ATTRIBUTE +#undef DEF_FORMAT_ATTRIBUTE_NOTHROW +#undef DEF_FORMAT_ATTRIBUTE_BOTH + +/* Transactional memory variants of the above. */ + +DEF_ATTR_TREE_LIST (ATTR_TM_NOTHROW_LIST, + ATTR_TM_REGPARM, ATTR_NULL, ATTR_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_TM_TMPURE_NOTHROW_LIST, + ATTR_TM_TMPURE, ATTR_NULL, ATTR_TM_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_TM_PURE_TMPURE_NOTHROW_LIST, + ATTR_PURE, ATTR_NULL, ATTR_TM_TMPURE_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_TM_NORETURN_NOTHROW_LIST, + ATTR_TM_REGPARM, ATTR_NULL, ATTR_NORETURN_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_TM_CONST_NOTHROW_LIST, + ATTR_TM_REGPARM, ATTR_NULL, ATTR_CONST_NOTHROW_LIST) +DEF_ATTR_TREE_LIST (ATTR_TM_NOTHROW_RT_LIST, + ATTR_RETURNS_TWICE, ATTR_NULL, ATTR_TM_NOTHROW_LIST) + +/* Same attributes used for BUILT_IN_MALLOC except with TM_PURE thrown in. */ +DEF_ATTR_TREE_LIST (ATTR_TMPURE_MALLOC_NOTHROW_LIST, + ATTR_TM_TMPURE, ATTR_NULL, ATTR_MALLOC_NOTHROW_LIST) +/* Same attributes used for BUILT_IN_FREE except with TM_PURE thrown in. */ +DEF_ATTR_TREE_LIST (ATTR_TMPURE_NOTHROW_LIST, + ATTR_TM_TMPURE, ATTR_NULL, ATTR_NOTHROW_LIST) + +DEF_ATTR_TREE_LIST (ATTR_TMPURE_NOTHROW_LEAF_LIST, + ATTR_TM_TMPURE, ATTR_NULL, ATTR_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST, + ATTR_TM_TMPURE, ATTR_NULL, ATTR_NORETURN_NOTHROW_LEAF_LIST) +DEF_ATTR_TREE_LIST (ATTR_TMPURE_NORETURN_NOTHROW_LEAF_COLD_LIST, + ATTR_COLD, ATTR_NULL, + ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST) + +/* Construct a tree for a format_arg attribute. */ +#define DEF_FORMAT_ARG_ATTRIBUTE(FA) \ + DEF_ATTR_TREE_LIST (ATTR_FORMAT_ARG_##FA, ATTR_FORMAT_ARG, \ + ATTR_LIST_##FA, ATTR_NOTHROW_NONNULL_##FA) +DEF_FORMAT_ARG_ATTRIBUTE(1) +DEF_FORMAT_ARG_ATTRIBUTE(2) +#undef DEF_FORMAT_ARG_ATTRIBUTE + diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/builtin-types.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/builtin-types.def new file mode 100644 index 0000000..3a7cecd --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/builtin-types.def @@ -0,0 +1,874 @@ +/* Copyright (C) 2001-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* This header provides a declarative way of describing the types that + are used when declaring builtin functions. + + Before including this header, you must define the following macros: + + DEF_PRIMITIVE_TYPE (ENUM, TYPE) + + The ENUM is an identifier indicating which type is being defined. + TYPE is an expression for a `tree' that represents the type. + + DEF_FUNCTION_TYPE_0 (ENUM, RETURN) + DEF_FUNCTION_TYPE_1 (ENUM, RETURN, ARG1) + DEF_FUNCTION_TYPE_2 (ENUM, RETURN, ARG1, ARG2) + [...] + + These macros describe function types. ENUM is as above. The + RETURN type is one of the enumerals already defined. ARG1, ARG2, + etc, give the types of the arguments, similarly. + + DEF_FUNCTION_TYPE_VAR_0 (ENUM, RETURN) + DEF_FUNCTION_TYPE_VAR_1 (ENUM, RETURN, ARG1) + DEF_FUNCTION_TYPE_VAR_2 (ENUM, RETURN, ARG1, ARG2) + [...] + + Similar, but for function types that take variable arguments. + For example: + + DEF_FUNCTION_TYPE_1 (BT_INT_DOUBLE, BT_INT, BT_DOUBLE) + + describes the type `int ()(double)', using the enumeral + BT_INT_DOUBLE, whereas: + + DEF_FUNCTION_TYPE_VAR_1 (BT_INT_DOUBLE_VAR, BT_INT, BT_DOUBLE) + + describes the type `int ()(double, ...)'. + + DEF_POINTER_TYPE (ENUM, TYPE) + + This macro describes a pointer type. ENUM is as above; TYPE is + the type pointed to. */ + +DEF_PRIMITIVE_TYPE (BT_VOID, void_type_node) +DEF_PRIMITIVE_TYPE (BT_BOOL, boolean_type_node) +DEF_PRIMITIVE_TYPE (BT_INT, integer_type_node) +DEF_PRIMITIVE_TYPE (BT_UINT, unsigned_type_node) +DEF_PRIMITIVE_TYPE (BT_LONG, long_integer_type_node) +DEF_PRIMITIVE_TYPE (BT_ULONG, long_unsigned_type_node) +DEF_PRIMITIVE_TYPE (BT_LONGLONG, long_long_integer_type_node) +DEF_PRIMITIVE_TYPE (BT_ULONGLONG, long_long_unsigned_type_node) +DEF_PRIMITIVE_TYPE (BT_INTMAX, intmax_type_node) +DEF_PRIMITIVE_TYPE (BT_UINTMAX, uintmax_type_node) +DEF_PRIMITIVE_TYPE (BT_INT8, signed_char_type_node) +DEF_PRIMITIVE_TYPE (BT_INT16, short_integer_type_node) +DEF_PRIMITIVE_TYPE (BT_UINT8, unsigned_char_type_node) +DEF_PRIMITIVE_TYPE (BT_UINT16, uint16_type_node) +DEF_PRIMITIVE_TYPE (BT_UINT32, uint32_type_node) +DEF_PRIMITIVE_TYPE (BT_UINT64, uint64_type_node) +DEF_PRIMITIVE_TYPE (BT_UINT128, uint128_type_node + ? uint128_type_node + : error_mark_node) +DEF_PRIMITIVE_TYPE (BT_WORD, (*lang_hooks.types.type_for_mode) (word_mode, 1)) +DEF_PRIMITIVE_TYPE (BT_UNWINDWORD, (*lang_hooks.types.type_for_mode) + (targetm.unwind_word_mode (), 1)) +DEF_PRIMITIVE_TYPE (BT_FLOAT, float_type_node) +DEF_PRIMITIVE_TYPE (BT_DOUBLE, double_type_node) +DEF_PRIMITIVE_TYPE (BT_LONGDOUBLE, long_double_type_node) +DEF_PRIMITIVE_TYPE (BT_FLOAT16, (float16_type_node + ? float16_type_node + : error_mark_node)) +DEF_PRIMITIVE_TYPE (BT_FLOAT32, (float32_type_node + ? float32_type_node + : error_mark_node)) +DEF_PRIMITIVE_TYPE (BT_FLOAT64, (float64_type_node + ? float64_type_node + : error_mark_node)) +DEF_PRIMITIVE_TYPE (BT_FLOAT128, (float128_type_node + ? float128_type_node + : error_mark_node)) +DEF_PRIMITIVE_TYPE (BT_FLOAT32X, (float32x_type_node + ? float32x_type_node + : error_mark_node)) +DEF_PRIMITIVE_TYPE (BT_FLOAT64X, (float64x_type_node + ? float64x_type_node + : error_mark_node)) +DEF_PRIMITIVE_TYPE (BT_FLOAT128X, (float128x_type_node + ? float128x_type_node + : error_mark_node)) +DEF_PRIMITIVE_TYPE (BT_COMPLEX_FLOAT, complex_float_type_node) +DEF_PRIMITIVE_TYPE (BT_COMPLEX_DOUBLE, complex_double_type_node) +DEF_PRIMITIVE_TYPE (BT_COMPLEX_LONGDOUBLE, complex_long_double_type_node) + +DEF_PRIMITIVE_TYPE (BT_PTR, ptr_type_node) +DEF_PRIMITIVE_TYPE (BT_FILEPTR, fileptr_type_node) +DEF_PRIMITIVE_TYPE (BT_CONST_TM_PTR, const_tm_ptr_type_node) +DEF_PRIMITIVE_TYPE (BT_FENV_T_PTR, fenv_t_ptr_type_node) +DEF_PRIMITIVE_TYPE (BT_CONST_FENV_T_PTR, const_fenv_t_ptr_type_node) +DEF_PRIMITIVE_TYPE (BT_FEXCEPT_T_PTR, fexcept_t_ptr_type_node) +DEF_PRIMITIVE_TYPE (BT_CONST_FEXCEPT_T_PTR, const_fexcept_t_ptr_type_node) +DEF_PRIMITIVE_TYPE (BT_CONST_PTR, const_ptr_type_node) +DEF_PRIMITIVE_TYPE (BT_VOLATILE_PTR, + build_pointer_type + (build_qualified_type (void_type_node, + TYPE_QUAL_VOLATILE))) +DEF_PRIMITIVE_TYPE (BT_CONST_VOLATILE_PTR, + build_pointer_type + (build_qualified_type (void_type_node, + TYPE_QUAL_VOLATILE|TYPE_QUAL_CONST))) +DEF_PRIMITIVE_TYPE (BT_PTRMODE, (*lang_hooks.types.type_for_mode)(ptr_mode, 0)) +DEF_PRIMITIVE_TYPE (BT_INT_PTR, integer_ptr_type_node) +DEF_PRIMITIVE_TYPE (BT_FLOAT_PTR, float_ptr_type_node) +DEF_PRIMITIVE_TYPE (BT_DOUBLE_PTR, double_ptr_type_node) +DEF_PRIMITIVE_TYPE (BT_CONST_DOUBLE_PTR, + build_pointer_type + (build_qualified_type (double_type_node, + TYPE_QUAL_CONST))) +DEF_PRIMITIVE_TYPE (BT_LONGDOUBLE_PTR, long_double_ptr_type_node) +DEF_PRIMITIVE_TYPE (BT_PID, pid_type_node) +DEF_PRIMITIVE_TYPE (BT_SIZE, size_type_node) +DEF_PRIMITIVE_TYPE (BT_CONST_SIZE, + build_qualified_type (size_type_node, TYPE_QUAL_CONST)) +DEF_PRIMITIVE_TYPE (BT_SSIZE, signed_size_type_node) +DEF_PRIMITIVE_TYPE (BT_WINT, wint_type_node) +DEF_PRIMITIVE_TYPE (BT_STRING, string_type_node) +DEF_PRIMITIVE_TYPE (BT_CONST_STRING, const_string_type_node) + +DEF_PRIMITIVE_TYPE (BT_DFLOAT32, (dfloat32_type_node + ? dfloat32_type_node + : error_mark_node)) +DEF_PRIMITIVE_TYPE (BT_DFLOAT64, (dfloat64_type_node + ? dfloat64_type_node + : error_mark_node)) +DEF_PRIMITIVE_TYPE (BT_DFLOAT128, (dfloat128_type_node + ? dfloat128_type_node + : error_mark_node)) + +DEF_PRIMITIVE_TYPE (BT_VALIST_REF, va_list_ref_type_node) +DEF_PRIMITIVE_TYPE (BT_VALIST_ARG, va_list_arg_type_node) + +DEF_PRIMITIVE_TYPE (BT_I1, builtin_type_for_size (BITS_PER_UNIT*1, 1)) +DEF_PRIMITIVE_TYPE (BT_I2, builtin_type_for_size (BITS_PER_UNIT*2, 1)) +DEF_PRIMITIVE_TYPE (BT_I4, builtin_type_for_size (BITS_PER_UNIT*4, 1)) +DEF_PRIMITIVE_TYPE (BT_I8, builtin_type_for_size (BITS_PER_UNIT*8, 1)) +DEF_PRIMITIVE_TYPE (BT_I16, builtin_type_for_size (BITS_PER_UNIT*16, 1)) + +/* The C type `char * const *'. */ +DEF_PRIMITIVE_TYPE (BT_PTR_CONST_STRING, + build_pointer_type + (build_qualified_type (string_type_node, + TYPE_QUAL_CONST))) + +DEF_POINTER_TYPE (BT_PTR_UINT, BT_UINT) +DEF_POINTER_TYPE (BT_PTR_LONG, BT_LONG) +DEF_POINTER_TYPE (BT_PTR_ULONG, BT_ULONG) +DEF_POINTER_TYPE (BT_PTR_LONGLONG, BT_LONGLONG) +DEF_POINTER_TYPE (BT_PTR_ULONGLONG, BT_ULONGLONG) +DEF_POINTER_TYPE (BT_PTR_PTR, BT_PTR) + +DEF_FUNCTION_TYPE_0 (BT_FN_VOID, BT_VOID) +DEF_FUNCTION_TYPE_0 (BT_FN_BOOL, BT_BOOL) +DEF_FUNCTION_TYPE_0 (BT_FN_PTR, BT_PTR) +DEF_FUNCTION_TYPE_0 (BT_FN_CONST_STRING, BT_CONST_STRING) +DEF_FUNCTION_TYPE_0 (BT_FN_PID, BT_PID) +DEF_FUNCTION_TYPE_0 (BT_FN_INT, BT_INT) +DEF_FUNCTION_TYPE_0 (BT_FN_UINT, BT_UINT) +DEF_FUNCTION_TYPE_0 (BT_FN_ULONG, BT_ULONG) +DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT, BT_FLOAT) +DEF_FUNCTION_TYPE_0 (BT_FN_DOUBLE, BT_DOUBLE) +/* For "long double" we use LONGDOUBLE (not LONG_DOUBLE) to + distinguish it from two types in sequence, "long" followed by + "double". */ +DEF_FUNCTION_TYPE_0 (BT_FN_LONGDOUBLE, BT_LONGDOUBLE) +DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT16, BT_FLOAT16) +DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT32, BT_FLOAT32) +DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT64, BT_FLOAT64) +DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT128, BT_FLOAT128) +DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT32X, BT_FLOAT32X) +DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT64X, BT_FLOAT64X) +DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT128X, BT_FLOAT128X) +DEF_FUNCTION_TYPE_0 (BT_FN_DFLOAT32, BT_DFLOAT32) +DEF_FUNCTION_TYPE_0 (BT_FN_DFLOAT64, BT_DFLOAT64) +DEF_FUNCTION_TYPE_0 (BT_FN_DFLOAT128, BT_DFLOAT128) + +DEF_FUNCTION_TYPE_1 (BT_FN_LONG_LONG, BT_LONG, BT_LONG) +DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_LONGLONG, BT_LONGLONG, BT_LONGLONG) +DEF_FUNCTION_TYPE_1 (BT_FN_INTMAX_INTMAX, BT_INTMAX, BT_INTMAX) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_FLOAT, BT_FLOAT, BT_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_DOUBLE, BT_DOUBLE, BT_DOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_LONGDOUBLE, + BT_LONGDOUBLE, BT_LONGDOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT16_FLOAT16, BT_FLOAT16, BT_FLOAT16) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT32_FLOAT32, BT_FLOAT32, BT_FLOAT32) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT64_FLOAT64, BT_FLOAT64, BT_FLOAT64) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT128_FLOAT128, BT_FLOAT128, BT_FLOAT128) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT32X_FLOAT32X, BT_FLOAT32X, BT_FLOAT32X) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT64X_FLOAT64X, BT_FLOAT64X, BT_FLOAT64X) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT128X_FLOAT128X, BT_FLOAT128X, BT_FLOAT128X) +DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, + BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, + BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, + BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_COMPLEX_FLOAT, + BT_FLOAT, BT_COMPLEX_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_COMPLEX_DOUBLE, + BT_DOUBLE, BT_COMPLEX_DOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, + BT_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT_FLOAT, + BT_COMPLEX_FLOAT, BT_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_DOUBLE_DOUBLE, + BT_COMPLEX_DOUBLE, BT_DOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_LONGDOUBLE_LONGDOUBLE, + BT_COMPLEX_LONGDOUBLE, BT_LONGDOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_PTR_UINT, BT_PTR, BT_UINT) +DEF_FUNCTION_TYPE_1 (BT_FN_PTR_SIZE, BT_PTR, BT_SIZE) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_INT, BT_INT, BT_INT) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_UINT, BT_INT, BT_UINT) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_LONG, BT_INT, BT_LONG) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_ULONG, BT_INT, BT_ULONG) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_LONGLONG, BT_INT, BT_LONGLONG) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_ULONGLONG, BT_INT, BT_ULONGLONG) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_INTMAX, BT_INT, BT_INTMAX) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_UINTMAX, BT_INT, BT_UINTMAX) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_PTR, BT_INT, BT_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_FLOAT, BT_INT, BT_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_DOUBLE, BT_INT, BT_DOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_LONGDOUBLE, BT_INT, BT_LONGDOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_DFLOAT32, BT_INT, BT_DFLOAT32) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_DFLOAT64, BT_INT, BT_DFLOAT64) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_DFLOAT128, BT_INT, BT_DFLOAT128) +DEF_FUNCTION_TYPE_1 (BT_FN_LONG_FLOAT, BT_LONG, BT_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_LONG_DOUBLE, BT_LONG, BT_DOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_LONG_LONGDOUBLE, BT_LONG, BT_LONGDOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_FLOAT, BT_LONGLONG, BT_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_DOUBLE, BT_LONGLONG, BT_DOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_LONGDOUBLE, BT_LONGLONG, BT_LONGDOUBLE) +DEF_FUNCTION_TYPE_1 (BT_FN_VOID_PTR, BT_VOID, BT_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_SIZE_CONST_STRING, BT_SIZE, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_CONST_STRING, BT_INT, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_PTR_PTR, BT_PTR, BT_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_VOID_VALIST_REF, BT_VOID, BT_VALIST_REF) +DEF_FUNCTION_TYPE_1 (BT_FN_VOID_INT, BT_VOID, BT_INT) +DEF_FUNCTION_TYPE_1 (BT_FN_VOID_BOOL, BT_VOID, BT_BOOL) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_CONST_STRING, BT_FLOAT, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_CONST_STRING, BT_DOUBLE, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_CONST_STRING, + BT_LONGDOUBLE, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT16_CONST_STRING, BT_FLOAT16, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT32_CONST_STRING, BT_FLOAT32, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT64_CONST_STRING, BT_FLOAT64, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT128_CONST_STRING, BT_FLOAT128, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT32X_CONST_STRING, BT_FLOAT32X, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT64X_CONST_STRING, BT_FLOAT64X, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT128X_CONST_STRING, BT_FLOAT128X, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_DFLOAT32_CONST_STRING, BT_DFLOAT32, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_DFLOAT64_CONST_STRING, BT_DFLOAT64, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_DFLOAT128_CONST_STRING, + BT_DFLOAT128, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_STRING_CONST_STRING, BT_STRING, BT_CONST_STRING) +DEF_FUNCTION_TYPE_1 (BT_FN_UNWINDWORD_PTR, BT_UNWINDWORD, BT_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_WINT, BT_INT, BT_WINT) +DEF_FUNCTION_TYPE_1 (BT_FN_WINT_WINT, BT_WINT, BT_WINT) +DEF_FUNCTION_TYPE_1 (BT_FN_DFLOAT32_DFLOAT32, BT_DFLOAT32, BT_DFLOAT32) +DEF_FUNCTION_TYPE_1 (BT_FN_DFLOAT64_DFLOAT64, BT_DFLOAT64, BT_DFLOAT64) +DEF_FUNCTION_TYPE_1 (BT_FN_DFLOAT128_DFLOAT128, BT_DFLOAT128, BT_DFLOAT128) +DEF_FUNCTION_TYPE_1 (BT_FN_VOID_VPTR, BT_VOID, BT_VOLATILE_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_VOID_PTRPTR, BT_VOID, BT_PTR_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_VOID_CONST_PTR, BT_VOID, BT_CONST_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT_UINT, BT_UINT, BT_UINT) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT_INT, BT_UINT, BT_INT) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT_ULONG, BT_UINT, BT_ULONG) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT_LONG, BT_UINT, BT_LONG) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT_PTR, BT_UINT, BT_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT_CONST_PTR, BT_UINT, BT_CONST_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_ULONG_PTR, BT_ULONG, BT_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_ULONG_CONST_PTR, BT_ULONG, BT_CONST_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_ULONG_ULONG, BT_ULONG, BT_ULONG) +DEF_FUNCTION_TYPE_1 (BT_FN_ULONGLONG_ULONGLONG, BT_ULONGLONG, BT_ULONGLONG) +DEF_FUNCTION_TYPE_1 (BT_FN_INT8_FLOAT, BT_INT8, BT_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_INT16_FLOAT, BT_INT16, BT_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT32_FLOAT, BT_UINT32, BT_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT16_FLOAT, BT_UINT16, BT_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT8_FLOAT, BT_UINT8, BT_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT16_UINT16, BT_UINT16, BT_UINT16) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT32_UINT32, BT_UINT32, BT_UINT32) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT64_UINT64, BT_UINT64, BT_UINT64) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT128_UINT128, BT_UINT128, BT_UINT128) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT64_FLOAT, BT_UINT64, BT_FLOAT) +DEF_FUNCTION_TYPE_1 (BT_FN_BOOL_INT, BT_BOOL, BT_INT) +DEF_FUNCTION_TYPE_1 (BT_FN_BOOL_PTR, BT_BOOL, BT_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_PTR_CONST_PTR, BT_PTR, BT_CONST_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_CONST_PTR_CONST_PTR, BT_CONST_PTR, BT_CONST_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT16_UINT32, BT_UINT16, BT_UINT32) +DEF_FUNCTION_TYPE_1 (BT_FN_UINT32_UINT16, BT_UINT32, BT_UINT16) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_FENV_T_PTR, BT_INT, BT_FENV_T_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_INT_CONST_FENV_T_PTR, BT_INT, BT_CONST_FENV_T_PTR) + +DEF_POINTER_TYPE (BT_PTR_FN_VOID_PTR, BT_FN_VOID_PTR) + +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_INT, BT_VOID, BT_PTR, BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_STRING_STRING_CONST_STRING, + BT_STRING, BT_STRING, BT_CONST_STRING) +DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_CONST_STRING, + BT_INT, BT_CONST_STRING, BT_CONST_STRING) +DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_CONST_STRING, + BT_STRING, BT_CONST_STRING, BT_CONST_STRING) +DEF_FUNCTION_TYPE_2 (BT_FN_SIZE_CONST_STRING_CONST_STRING, + BT_SIZE, BT_CONST_STRING, BT_CONST_STRING) +DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_INT, + BT_STRING, BT_CONST_STRING, BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_SIZE, + BT_STRING, BT_CONST_STRING, BT_SIZE) +DEF_FUNCTION_TYPE_2 (BT_FN_SIZE_CONST_STRING_SIZE, + BT_SIZE, BT_CONST_STRING, BT_SIZE) +DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_FILEPTR, + BT_INT, BT_CONST_STRING, BT_FILEPTR) +DEF_FUNCTION_TYPE_2 (BT_FN_INT_INT_FILEPTR, + BT_INT, BT_INT, BT_FILEPTR) +DEF_FUNCTION_TYPE_2 (BT_FN_UINT16_UINT16_UINT16, + BT_UINT16, BT_UINT16, BT_UINT16) +DEF_FUNCTION_TYPE_2 (BT_FN_INT_PTR_INT, + BT_INT, BT_PTR, BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_UINT_PTR_UINT, + BT_UINT, BT_PTR, BT_UINT) +DEF_FUNCTION_TYPE_2 (BT_FN_LONG_PTR_LONG, + BT_LONG, BT_PTR, BT_LONG) +DEF_FUNCTION_TYPE_2 (BT_FN_ULONG_PTR_ULONG, + BT_ULONG, BT_PTR, BT_ULONG) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTRMODE_PTR, + BT_VOID, BT_PTRMODE, BT_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_PTRMODE, + BT_VOID, BT_PTR, BT_PTRMODE) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_UINT8_UINT8, + BT_VOID, BT_UINT8, BT_UINT8) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_UINT16_UINT16, + BT_VOID, BT_UINT16, BT_UINT16) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_UINT32_UINT32, + BT_VOID, BT_UINT32, BT_UINT32) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_UINT64_UINT64, + BT_VOID, BT_UINT64, BT_UINT64) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_FLOAT_FLOAT, + BT_VOID, BT_FLOAT, BT_FLOAT) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_DOUBLE_DOUBLE, + BT_VOID, BT_DOUBLE, BT_DOUBLE) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_UINT64_PTR, + BT_VOID, BT_UINT64, BT_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VALIST_REF_VALIST_ARG, + BT_VOID, BT_VALIST_REF, BT_VALIST_ARG) +DEF_FUNCTION_TYPE_2 (BT_FN_LONG_LONG_LONG, + BT_LONG, BT_LONG, BT_LONG) +DEF_FUNCTION_TYPE_2 (BT_FN_UINT8_UINT8_UINT8, + BT_UINT8, BT_UINT8, BT_UINT8) +DEF_FUNCTION_TYPE_2 (BT_FN_INT8_INT8_INT8, + BT_INT8, BT_INT8, BT_INT8) +DEF_FUNCTION_TYPE_2 (BT_FN_INT16_INT16_INT16, + BT_INT16, BT_INT16, BT_INT16) +DEF_FUNCTION_TYPE_2 (BT_FN_INT_INT_INT, + BT_INT, BT_INT, BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_UINT_FLOAT_UINT, + BT_UINT, BT_FLOAT, BT_UINT) +DEF_FUNCTION_TYPE_2 (BT_FN_UINT_DOUBLE_UINT, + BT_UINT, BT_DOUBLE, BT_UINT) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_UINT_UINT, + BT_FLOAT, BT_UINT, BT_UINT) +DEF_FUNCTION_TYPE_2 (BT_FN_ULONG_UINT_UINT, + BT_ULONG, BT_UINT, BT_UINT) +DEF_FUNCTION_TYPE_2 (BT_FN_ULONG_UINT_PTR, + BT_ULONG, BT_UINT, BT_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_ULONG_ULONG_ULONG, + BT_ULONG, BT_ULONG, BT_ULONG) +DEF_FUNCTION_TYPE_2 (BT_FN_UINT_UINT_UINT, + BT_UINT, BT_UINT, BT_UINT) +DEF_FUNCTION_TYPE_2 (BT_FN_INT_PTR_CONST_STRING, + BT_INT, BT_PTR, BT_CONST_STRING) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_SIZE, + BT_VOID, BT_PTR, BT_SIZE) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_UINT_PTR, + BT_VOID, BT_UINT, BT_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_FLOAT, + BT_FLOAT, BT_FLOAT, BT_FLOAT) +DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_DOUBLE, + BT_DOUBLE, BT_DOUBLE, BT_DOUBLE) +DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, + BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT16_FLOAT16_FLOAT16, + BT_FLOAT16, BT_FLOAT16, BT_FLOAT16) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT32_FLOAT32_FLOAT32, + BT_FLOAT32, BT_FLOAT32, BT_FLOAT32) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT64_FLOAT64_FLOAT64, + BT_FLOAT64, BT_FLOAT64, BT_FLOAT64) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT128_FLOAT128_FLOAT128, + BT_FLOAT128, BT_FLOAT128, BT_FLOAT128) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT32X_FLOAT32X_FLOAT32X, + BT_FLOAT32X, BT_FLOAT32X, BT_FLOAT32X) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT64X_FLOAT64X_FLOAT64X, + BT_FLOAT64X, BT_FLOAT64X, BT_FLOAT64X) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT128X_FLOAT128X_FLOAT128X, + BT_FLOAT128X, BT_FLOAT128X, BT_FLOAT128X) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_FLOATPTR, + BT_FLOAT, BT_FLOAT, BT_FLOAT_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_DOUBLEPTR, + BT_DOUBLE, BT_DOUBLE, BT_DOUBLE_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLEPTR, + BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_LONGDOUBLE, + BT_FLOAT, BT_FLOAT, BT_LONGDOUBLE) +DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_LONGDOUBLE, + BT_DOUBLE, BT_DOUBLE, BT_LONGDOUBLE) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_INT, + BT_FLOAT, BT_FLOAT, BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_INT, + BT_DOUBLE, BT_DOUBLE, BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_INT, + BT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_INTPTR, + BT_FLOAT, BT_FLOAT, BT_INT_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_INTPTR, + BT_DOUBLE, BT_DOUBLE, BT_INT_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR, + BT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_INT_FLOAT, + BT_FLOAT, BT_INT, BT_FLOAT) +DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_INT_DOUBLE, + BT_DOUBLE, BT_INT, BT_DOUBLE) +DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_INT_LONGDOUBLE, + BT_LONGDOUBLE, BT_INT, BT_LONGDOUBLE) +DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_LONG, + BT_FLOAT, BT_FLOAT, BT_LONG) +DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_LONG, + BT_DOUBLE, BT_DOUBLE, BT_LONG) +DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONG, + BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONG) +DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_VALIST_ARG, + BT_INT, BT_CONST_STRING, BT_VALIST_ARG) +DEF_FUNCTION_TYPE_2 (BT_FN_PTR_SIZE_SIZE, + BT_PTR, BT_SIZE, BT_SIZE) +DEF_FUNCTION_TYPE_2 (BT_FN_PTR_PTR_SIZE, + BT_PTR, BT_PTR, BT_SIZE) +DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT_COMPLEX_FLOAT, + BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT) +DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE_COMPLEX_DOUBLE, + BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE) +DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, + BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_PTR, BT_VOID, BT_PTR, BT_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING, + BT_INT, BT_CONST_STRING, BT_PTR_CONST_STRING) +DEF_FUNCTION_TYPE_2 (BT_FN_SIZE_CONST_PTR_INT, BT_SIZE, BT_CONST_PTR, BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_I1_VPTR_I1, BT_I1, BT_VOLATILE_PTR, BT_I1) +DEF_FUNCTION_TYPE_2 (BT_FN_I2_VPTR_I2, BT_I2, BT_VOLATILE_PTR, BT_I2) +DEF_FUNCTION_TYPE_2 (BT_FN_I4_VPTR_I4, BT_I4, BT_VOLATILE_PTR, BT_I4) +DEF_FUNCTION_TYPE_2 (BT_FN_I8_VPTR_I8, BT_I8, BT_VOLATILE_PTR, BT_I8) +DEF_FUNCTION_TYPE_2 (BT_FN_I16_VPTR_I16, BT_I16, BT_VOLATILE_PTR, BT_I16) +DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_LONGPTR_LONGPTR, + BT_BOOL, BT_PTR_LONG, BT_PTR_LONG) +DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, + BT_BOOL, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG) +DEF_FUNCTION_TYPE_2 (BT_FN_I1_CONST_VPTR_INT, BT_I1, BT_CONST_VOLATILE_PTR, + BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_I2_CONST_VPTR_INT, BT_I2, BT_CONST_VOLATILE_PTR, + BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_I4_CONST_VPTR_INT, BT_I4, BT_CONST_VOLATILE_PTR, + BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_I8_CONST_VPTR_INT, BT_I8, BT_CONST_VOLATILE_PTR, + BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_I16_CONST_VPTR_INT, BT_I16, BT_CONST_VOLATILE_PTR, + BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_INT, BT_VOID, BT_VOLATILE_PTR, BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_VPTR_INT, BT_BOOL, BT_VOLATILE_PTR, BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_SIZE_CONST_VPTR, BT_BOOL, BT_SIZE, + BT_CONST_VOLATILE_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_INT_BOOL, BT_BOOL, BT_INT, BT_BOOL) +DEF_FUNCTION_TYPE_2 (BT_FN_UINT_UINT_PTR, BT_UINT, BT_UINT, BT_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_UINT_UINT_CONST_PTR, BT_UINT, BT_UINT, BT_CONST_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_PTR_CONST_PTR_SIZE, BT_PTR, BT_CONST_PTR, BT_SIZE) +DEF_FUNCTION_TYPE_2 (BT_FN_PTR_CONST_PTR_CONST_PTR, BT_PTR, BT_CONST_PTR, BT_CONST_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTRPTR_CONST_PTR, BT_VOID, BT_PTR_PTR, BT_CONST_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_CONST_PTR_SIZE, BT_VOID, BT_CONST_PTR, BT_SIZE) +DEF_FUNCTION_TYPE_2 (BT_FN_CONST_PTR_CONST_PTR_CONST_PTR, BT_CONST_PTR, BT_CONST_PTR, BT_CONST_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_UINT32_UINT64_PTR, + BT_UINT32, BT_UINT64, BT_PTR) +DEF_FUNCTION_TYPE_2 (BT_FN_INT_FEXCEPT_T_PTR_INT, BT_INT, BT_FEXCEPT_T_PTR, + BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_FEXCEPT_T_PTR_INT, BT_INT, + BT_CONST_FEXCEPT_T_PTR, BT_INT) +DEF_FUNCTION_TYPE_2 (BT_FN_PTR_CONST_PTR_UINT8, BT_PTR, BT_CONST_PTR, BT_UINT8) + +DEF_POINTER_TYPE (BT_PTR_FN_VOID_PTR_PTR, BT_FN_VOID_PTR_PTR) + +DEF_FUNCTION_TYPE_3 (BT_FN_STRING_STRING_CONST_STRING_SIZE, + BT_STRING, BT_STRING, BT_CONST_STRING, BT_SIZE) +DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_CONST_STRING_SIZE, + BT_INT, BT_CONST_STRING, BT_CONST_STRING, BT_SIZE) +DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_CONST_PTR_SIZE, + BT_PTR, BT_PTR, BT_CONST_PTR, BT_SIZE) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_CONST_PTR_SIZE, + BT_VOID, BT_PTR, BT_CONST_PTR, BT_SIZE) +DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, + BT_INT, BT_CONST_PTR, BT_CONST_PTR, BT_SIZE) +DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_INT_SIZE, + BT_PTR, BT_PTR, BT_INT, BT_SIZE) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_INT_SIZE, + BT_VOID, BT_PTR, BT_INT, BT_SIZE) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_INT_INT, + BT_VOID, BT_PTR, BT_INT, BT_INT) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_CONST_PTR_PTR_SIZE, + BT_VOID, BT_CONST_PTR, BT_PTR, BT_SIZE) +DEF_FUNCTION_TYPE_3 (BT_FN_INT_STRING_CONST_STRING_VALIST_ARG, + BT_INT, BT_STRING, BT_CONST_STRING, BT_VALIST_ARG) +DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_CONST_STRING_VALIST_ARG, + BT_INT, BT_CONST_STRING, BT_CONST_STRING, BT_VALIST_ARG) +DEF_FUNCTION_TYPE_3 (BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, + BT_INT, BT_FILEPTR, BT_CONST_STRING, BT_VALIST_ARG) +DEF_FUNCTION_TYPE_3 (BT_FN_INT_INT_UINT_UINT, + BT_INT, BT_INT, BT_UINT, BT_UINT) +DEF_FUNCTION_TYPE_3 (BT_FN_UINT_UINT_UINT_UINT, + BT_UINT, BT_UINT, BT_UINT, BT_UINT) +DEF_FUNCTION_TYPE_3 (BT_FN_UINT_UINT_UINT_PTR, + BT_UINT, BT_UINT, BT_UINT, BT_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_UINT_ULONG_ULONG_UINT, + BT_UINT, BT_ULONG, BT_ULONG, BT_UINT) +DEF_FUNCTION_TYPE_3 (BT_FN_ULONG_ULONG_ULONG_ULONG, + BT_ULONG, BT_ULONG, BT_ULONG, BT_ULONG) +DEF_FUNCTION_TYPE_3 (BT_FN_LONG_LONG_UINT_UINT, + BT_LONG, BT_LONG, BT_UINT, BT_UINT) +DEF_FUNCTION_TYPE_3 (BT_FN_LONG_LONG_LONG_DOUBLE, + BT_LONG, BT_LONG, BT_LONG, BT_DOUBLE) +DEF_FUNCTION_TYPE_3 (BT_FN_ULONG_ULONG_UINT_UINT, + BT_ULONG, BT_ULONG, BT_UINT, BT_UINT) +DEF_FUNCTION_TYPE_3 (BT_FN_STRING_CONST_STRING_CONST_STRING_INT, + BT_STRING, BT_CONST_STRING, BT_CONST_STRING, BT_INT) +DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT_FLOAT_FLOAT_FLOAT, + BT_FLOAT, BT_FLOAT, BT_FLOAT, BT_FLOAT) +DEF_FUNCTION_TYPE_3 (BT_FN_DOUBLE_DOUBLE_DOUBLE_DOUBLE, + BT_DOUBLE, BT_DOUBLE, BT_DOUBLE, BT_DOUBLE) +DEF_FUNCTION_TYPE_3 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, + BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE) +DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT16_FLOAT16_FLOAT16_FLOAT16, + BT_FLOAT16, BT_FLOAT16, BT_FLOAT16, BT_FLOAT16) +DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT32_FLOAT32_FLOAT32_FLOAT32, + BT_FLOAT32, BT_FLOAT32, BT_FLOAT32, BT_FLOAT32) +DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT64_FLOAT64_FLOAT64_FLOAT64, + BT_FLOAT64, BT_FLOAT64, BT_FLOAT64, BT_FLOAT64) +DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT128_FLOAT128_FLOAT128_FLOAT128, + BT_FLOAT128, BT_FLOAT128, BT_FLOAT128, BT_FLOAT128) +DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT32X_FLOAT32X_FLOAT32X_FLOAT32X, + BT_FLOAT32X, BT_FLOAT32X, BT_FLOAT32X, BT_FLOAT32X) +DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT64X_FLOAT64X_FLOAT64X_FLOAT64X, + BT_FLOAT64X, BT_FLOAT64X, BT_FLOAT64X, BT_FLOAT64X) +DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT128X_FLOAT128X_FLOAT128X_FLOAT128X, + BT_FLOAT128X, BT_FLOAT128X, BT_FLOAT128X, BT_FLOAT128X) +DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT_FLOAT_FLOAT_INTPTR, + BT_FLOAT, BT_FLOAT, BT_FLOAT, BT_INT_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_DOUBLE_DOUBLE_DOUBLE_INTPTR, + BT_DOUBLE, BT_DOUBLE, BT_DOUBLE, BT_INT_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_INTPTR, + BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_FLOAT_FLOATPTR_FLOATPTR, + BT_VOID, BT_FLOAT, BT_FLOAT_PTR, BT_FLOAT_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_DOUBLE_DOUBLEPTR_DOUBLEPTR, + BT_VOID, BT_DOUBLE, BT_DOUBLE_PTR, BT_DOUBLE_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_LONGDOUBLE_LONGDOUBLEPTR_LONGDOUBLEPTR, + BT_VOID, BT_LONGDOUBLE, BT_LONGDOUBLE_PTR, BT_LONGDOUBLE_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_PTR_PTR, BT_VOID, BT_PTR, BT_PTR, BT_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_PTR_UINT32, BT_VOID, BT_PTR, BT_PTR, BT_UINT32) +DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING_PTR_CONST_STRING, + BT_INT, BT_CONST_STRING, BT_PTR_CONST_STRING, BT_PTR_CONST_STRING) +DEF_FUNCTION_TYPE_3 (BT_FN_INT_INT_CONST_STRING_VALIST_ARG, + BT_INT, BT_INT, BT_CONST_STRING, BT_VALIST_ARG) +DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I1_I1, BT_BOOL, BT_VOLATILE_PTR, + BT_I1, BT_I1) +DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I2_I2, BT_BOOL, BT_VOLATILE_PTR, + BT_I2, BT_I2) +DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I4_I4, BT_BOOL, BT_VOLATILE_PTR, + BT_I4, BT_I4) +DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I8_I8, BT_BOOL, BT_VOLATILE_PTR, + BT_I8, BT_I8) +DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I16_I16, BT_BOOL, BT_VOLATILE_PTR, + BT_I16, BT_I16) +DEF_FUNCTION_TYPE_3 (BT_FN_I1_VPTR_I1_I1, BT_I1, BT_VOLATILE_PTR, BT_I1, BT_I1) +DEF_FUNCTION_TYPE_3 (BT_FN_I2_VPTR_I2_I2, BT_I2, BT_VOLATILE_PTR, BT_I2, BT_I2) +DEF_FUNCTION_TYPE_3 (BT_FN_I4_VPTR_I4_I4, BT_I4, BT_VOLATILE_PTR, BT_I4, BT_I4) +DEF_FUNCTION_TYPE_3 (BT_FN_I8_VPTR_I8_I8, BT_I8, BT_VOLATILE_PTR, BT_I8, BT_I8) +DEF_FUNCTION_TYPE_3 (BT_FN_I16_VPTR_I16_I16, BT_I16, BT_VOLATILE_PTR, + BT_I16, BT_I16) +DEF_FUNCTION_TYPE_3 (BT_FN_PTR_CONST_PTR_INT_SIZE, BT_PTR, + BT_CONST_PTR, BT_INT, BT_SIZE) +DEF_FUNCTION_TYPE_3 (BT_FN_I1_VPTR_I1_INT, BT_I1, BT_VOLATILE_PTR, BT_I1, BT_INT) +DEF_FUNCTION_TYPE_3 (BT_FN_I2_VPTR_I2_INT, BT_I2, BT_VOLATILE_PTR, BT_I2, BT_INT) +DEF_FUNCTION_TYPE_3 (BT_FN_I4_VPTR_I4_INT, BT_I4, BT_VOLATILE_PTR, BT_I4, BT_INT) +DEF_FUNCTION_TYPE_3 (BT_FN_I8_VPTR_I8_INT, BT_I8, BT_VOLATILE_PTR, BT_I8, BT_INT) +DEF_FUNCTION_TYPE_3 (BT_FN_I16_VPTR_I16_INT, BT_I16, BT_VOLATILE_PTR, BT_I16, BT_INT) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_VPTR_I1_INT, BT_VOID, BT_VOLATILE_PTR, BT_I1, BT_INT) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_VPTR_I2_INT, BT_VOID, BT_VOLATILE_PTR, BT_I2, BT_INT) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_VPTR_I4_INT, BT_VOID, BT_VOLATILE_PTR, BT_I4, BT_INT) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_VPTR_I8_INT, BT_VOID, BT_VOLATILE_PTR, BT_I8, BT_INT) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_VPTR_I16_INT, BT_VOID, BT_VOLATILE_PTR, BT_I16, BT_INT) +DEF_FUNCTION_TYPE_3 (BT_FN_INT_PTRPTR_SIZE_SIZE, BT_INT, BT_PTR_PTR, BT_SIZE, BT_SIZE) +DEF_FUNCTION_TYPE_3 (BT_FN_PTR_CONST_PTR_CONST_PTR_SIZE, BT_PTR, BT_CONST_PTR, BT_CONST_PTR, BT_SIZE) +DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_INT_INT_INTPTR, BT_BOOL, BT_INT, BT_INT, + BT_INT_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_LONG_LONG_LONGPTR, BT_BOOL, BT_LONG, BT_LONG, + BT_PTR_LONG) +DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_LONGLONG_LONGLONG_LONGLONGPTR, BT_BOOL, + BT_LONGLONG, BT_LONGLONG, BT_PTR_LONGLONG) +DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_UINT_UINT_UINTPTR, BT_BOOL, BT_UINT, BT_UINT, + BT_PTR_UINT) +DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_ULONG_ULONG_ULONGPTR, BT_BOOL, BT_ULONG, + BT_ULONG, BT_PTR_ULONG) +DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_ULONGLONG_ULONGLONG_ULONGLONGPTR, BT_BOOL, + BT_ULONGLONG, BT_ULONGLONG, BT_PTR_ULONGLONG) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_UINT32_UINT64_PTR, + BT_VOID, BT_UINT32, BT_UINT64, BT_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_UINT32_UINT32_PTR, + BT_VOID, BT_UINT32, BT_UINT32, BT_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_SIZE_SIZE_PTR, BT_VOID, BT_SIZE, BT_SIZE, + BT_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_UINT_UINT_PTR_PTR, BT_UINT, BT_UINT, BT_PTR, BT_PTR) +DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_CONST_SIZE_BOOL, + BT_PTR, BT_PTR, BT_CONST_SIZE, BT_BOOL) +DEF_FUNCTION_TYPE_3 (BT_FN_PTR_SIZE_SIZE_PTRMODE, + BT_PTR, BT_SIZE, BT_SIZE, BT_PTRMODE) +DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_UINT8_PTRMODE, BT_VOID, BT_PTR, BT_UINT8, + BT_PTRMODE) + +DEF_FUNCTION_TYPE_4 (BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, + BT_SIZE, BT_CONST_PTR, BT_SIZE, BT_SIZE, BT_FILEPTR) +DEF_FUNCTION_TYPE_4 (BT_FN_INT_STRING_SIZE_CONST_STRING_VALIST_ARG, + BT_INT, BT_STRING, BT_SIZE, BT_CONST_STRING, BT_VALIST_ARG) +DEF_FUNCTION_TYPE_4 (BT_FN_SIZE_STRING_SIZE_CONST_STRING_CONST_TM_PTR, + BT_SIZE, BT_STRING, BT_SIZE, BT_CONST_STRING, BT_CONST_TM_PTR) +DEF_FUNCTION_TYPE_4 (BT_FN_PTR_PTR_CONST_PTR_SIZE_SIZE, + BT_PTR, BT_PTR, BT_CONST_PTR, BT_SIZE, BT_SIZE) +DEF_FUNCTION_TYPE_4 (BT_FN_PTR_PTR_INT_SIZE_SIZE, + BT_PTR, BT_PTR, BT_INT, BT_SIZE, BT_SIZE) +DEF_FUNCTION_TYPE_4 (BT_FN_UINT_UINT_UINT_UINT_UINT, + BT_UINT, BT_UINT, BT_UINT, BT_UINT, BT_UINT) +DEF_FUNCTION_TYPE_4 (BT_FN_UINT_FLOAT_FLOAT_FLOAT_FLOAT, + BT_UINT, BT_FLOAT, BT_FLOAT, BT_FLOAT, BT_FLOAT) +DEF_FUNCTION_TYPE_4 (BT_FN_ULONG_ULONG_ULONG_UINT_UINT, + BT_ULONG, BT_ULONG, BT_ULONG, BT_UINT, BT_UINT) +DEF_FUNCTION_TYPE_4 (BT_FN_STRING_STRING_CONST_STRING_SIZE_SIZE, + BT_STRING, BT_STRING, BT_CONST_STRING, BT_SIZE, BT_SIZE) +DEF_FUNCTION_TYPE_4 (BT_FN_INT_FILEPTR_INT_CONST_STRING_VALIST_ARG, + BT_INT, BT_FILEPTR, BT_INT, BT_CONST_STRING, BT_VALIST_ARG) +DEF_FUNCTION_TYPE_4 (BT_FN_VOID_OMPFN_PTR_UINT_UINT, + BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT, BT_UINT) +DEF_FUNCTION_TYPE_4 (BT_FN_UINT_OMPFN_PTR_UINT_UINT, + BT_UINT, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT, BT_UINT) +DEF_FUNCTION_TYPE_4 (BT_FN_VOID_PTR_WORD_WORD_PTR, + BT_VOID, BT_PTR, BT_WORD, BT_WORD, BT_PTR) +DEF_FUNCTION_TYPE_4 (BT_FN_VOID_SIZE_VPTR_PTR_INT, BT_VOID, BT_SIZE, + BT_VOLATILE_PTR, BT_PTR, BT_INT) +DEF_FUNCTION_TYPE_4 (BT_FN_VOID_SIZE_CONST_VPTR_PTR_INT, BT_VOID, BT_SIZE, + BT_CONST_VOLATILE_PTR, BT_PTR, BT_INT) +DEF_FUNCTION_TYPE_4 (BT_FN_BOOL_UINT_LONGPTR_LONGPTR_LONGPTR, + BT_BOOL, BT_UINT, BT_PTR_LONG, BT_PTR_LONG, BT_PTR_LONG) +DEF_FUNCTION_TYPE_4 (BT_FN_BOOL_UINT_ULLPTR_ULLPTR_ULLPTR, + BT_BOOL, BT_UINT, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG, + BT_PTR_ULONGLONG) +DEF_FUNCTION_TYPE_4 (BT_FN_VOID_UINT_PTR_INT_PTR, BT_VOID, BT_INT, BT_PTR, + BT_INT, BT_PTR) +DEF_FUNCTION_TYPE_4 (BT_FN_BOOL_UINT_UINT_UINT_BOOL, + BT_BOOL, BT_UINT, BT_UINT, BT_UINT, BT_BOOL) + +DEF_FUNCTION_TYPE_5 (BT_FN_INT_STRING_INT_SIZE_CONST_STRING_VALIST_ARG, + BT_INT, BT_STRING, BT_INT, BT_SIZE, BT_CONST_STRING, + BT_VALIST_ARG) +DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_LONG_LONG_LONG_LONGPTR_LONGPTR, + BT_BOOL, BT_LONG, BT_LONG, BT_LONG, + BT_PTR_LONG, BT_PTR_LONG) +DEF_FUNCTION_TYPE_5 (BT_FN_VOID_SIZE_VPTR_PTR_PTR_INT, BT_VOID, BT_SIZE, + BT_VOLATILE_PTR, BT_PTR, BT_PTR, BT_INT) +DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_VPTR_PTR_I1_INT_INT, + BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I1, BT_INT, BT_INT) +DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_VPTR_PTR_I2_INT_INT, + BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I2, BT_INT, BT_INT) +DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_VPTR_PTR_I4_INT_INT, + BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I4, BT_INT, BT_INT) +DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_VPTR_PTR_I8_INT_INT, + BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I8, BT_INT, BT_INT) +DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_VPTR_PTR_I16_INT_INT, + BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I16, BT_INT, BT_INT) +DEF_FUNCTION_TYPE_5 (BT_FN_VOID_INT_SIZE_PTR_PTR_PTR, + BT_VOID, BT_INT, BT_SIZE, BT_PTR, BT_PTR, BT_PTR) +DEF_FUNCTION_TYPE_5 (BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT, + BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT, BT_UINT, + BT_UINT) +DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_UINT_LONGPTR_LONG_LONGPTR_LONGPTR, + BT_BOOL, BT_UINT, BT_PTR_LONG, BT_LONG, BT_PTR_LONG, + BT_PTR_LONG) +DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_UINT_ULLPTR_ULL_ULLPTR_ULLPTR, + BT_BOOL, BT_UINT, BT_PTR_ULONGLONG, BT_ULONGLONG, + BT_PTR_ULONGLONG, BT_PTR_ULONGLONG) + +DEF_FUNCTION_TYPE_6 (BT_FN_INT_STRING_SIZE_INT_SIZE_CONST_STRING_VALIST_ARG, + BT_INT, BT_STRING, BT_SIZE, BT_INT, BT_SIZE, + BT_CONST_STRING, BT_VALIST_ARG) +DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR, + BT_BOOL, BT_LONG, BT_LONG, BT_LONG, BT_LONG, + BT_PTR_LONG, BT_PTR_LONG) +DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR, + BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG, + BT_ULONGLONG, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG) +DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I1_BOOL_INT_INT, + BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I1, BT_BOOL, BT_INT, + BT_INT) +DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I2_BOOL_INT_INT, + BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I2, BT_BOOL, BT_INT, + BT_INT) +DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I4_BOOL_INT_INT, + BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I4, BT_BOOL, BT_INT, + BT_INT) +DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I8_BOOL_INT_INT, + BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I8, BT_BOOL, BT_INT, + BT_INT) +DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I16_BOOL_INT_INT, + BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I16, BT_BOOL, BT_INT, + BT_INT) +DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_SIZE_VPTR_PTR_PTR_INT_INT, BT_BOOL, BT_SIZE, + BT_VOLATILE_PTR, BT_PTR, BT_PTR, BT_INT, BT_INT) + +DEF_FUNCTION_TYPE_7 (BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT, + BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT, + BT_LONG, BT_LONG, BT_LONG, BT_UINT) +DEF_FUNCTION_TYPE_7 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR, + BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG, + BT_ULONGLONG, BT_ULONGLONG, + BT_PTR_ULONGLONG, BT_PTR_ULONGLONG) +DEF_FUNCTION_TYPE_7 (BT_FN_VOID_INT_SIZE_PTR_PTR_PTR_UINT_PTR, + BT_VOID, BT_INT, BT_SIZE, BT_PTR, BT_PTR, BT_PTR, BT_UINT, + BT_PTR) + +DEF_FUNCTION_TYPE_8 (BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT, + BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT, + BT_LONG, BT_LONG, BT_LONG, BT_LONG, BT_UINT) +DEF_FUNCTION_TYPE_8 (BT_FN_BOOL_UINT_LONGPTR_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR, + BT_BOOL, BT_UINT, BT_PTR_LONG, BT_LONG, BT_LONG, + BT_PTR_LONG, BT_PTR_LONG, BT_PTR, BT_PTR) +DEF_FUNCTION_TYPE_8 (BT_FN_BOOL_UINT_ULLPTR_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR, + BT_BOOL, BT_UINT, BT_PTR_ULONGLONG, BT_LONG, BT_ULONGLONG, + BT_PTR_ULONGLONG, BT_PTR_ULONGLONG, BT_PTR, BT_PTR) + +DEF_FUNCTION_TYPE_9 (BT_FN_VOID_INT_OMPFN_SIZE_PTR_PTR_PTR_UINT_PTR_PTR, + BT_VOID, BT_INT, BT_PTR_FN_VOID_PTR, BT_SIZE, BT_PTR, + BT_PTR, BT_PTR, BT_UINT, BT_PTR, BT_PTR) +DEF_FUNCTION_TYPE_9 (BT_FN_BOOL_LONG_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR, + BT_BOOL, BT_LONG, BT_LONG, BT_LONG, BT_LONG, BT_LONG, + BT_PTR_LONG, BT_PTR_LONG, BT_PTR, BT_PTR) + +DEF_FUNCTION_TYPE_10 (BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR_INT_PTR, + BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, + BT_PTR_FN_VOID_PTR_PTR, BT_LONG, BT_LONG, + BT_BOOL, BT_UINT, BT_PTR, BT_INT, BT_PTR) +DEF_FUNCTION_TYPE_10 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR, + BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG, + BT_ULONGLONG, BT_LONG, BT_ULONGLONG, BT_PTR_ULONGLONG, + BT_PTR_ULONGLONG, BT_PTR, BT_PTR) + +DEF_FUNCTION_TYPE_11 (BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_UINT_LONG_INT_LONG_LONG_LONG, + BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, + BT_PTR_FN_VOID_PTR_PTR, BT_LONG, BT_LONG, + BT_UINT, BT_LONG, BT_INT, BT_LONG, BT_LONG, BT_LONG) +DEF_FUNCTION_TYPE_11 (BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_UINT_LONG_INT_ULL_ULL_ULL, + BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, + BT_PTR_FN_VOID_PTR_PTR, BT_LONG, BT_LONG, + BT_UINT, BT_LONG, BT_INT, + BT_ULONGLONG, BT_ULONGLONG, BT_ULONGLONG) + +DEF_FUNCTION_TYPE_VAR_0 (BT_FN_VOID_VAR, BT_VOID) +DEF_FUNCTION_TYPE_VAR_0 (BT_FN_INT_VAR, BT_INT) +DEF_FUNCTION_TYPE_VAR_0 (BT_FN_PTR_VAR, BT_PTR) +DEF_FUNCTION_TYPE_VAR_0 (BT_FN_BOOL_VAR, BT_BOOL) + +DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_VALIST_REF_VAR, + BT_VOID, BT_VALIST_REF) +DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_CONST_PTR_VAR, + BT_VOID, BT_CONST_PTR) +DEF_FUNCTION_TYPE_VAR_1 (BT_FN_INT_CONST_STRING_VAR, + BT_INT, BT_CONST_STRING) +DEF_FUNCTION_TYPE_VAR_1 (BT_FN_UINT32_UINT32_VAR, + BT_UINT32, BT_UINT32) +DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_LONG_VAR, + BT_VOID, BT_LONG) +DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_ULL_VAR, + BT_VOID, BT_ULONGLONG) +DEF_FUNCTION_TYPE_VAR_1 (BT_FN_PTR_PTR_VAR, BT_PTR, BT_PTR) +DEF_FUNCTION_TYPE_VAR_1 (BT_FN_I1_I1_VAR, BT_I1, BT_I1) +DEF_FUNCTION_TYPE_VAR_1 (BT_FN_I2_I2_VAR, BT_I2, BT_I2) +DEF_FUNCTION_TYPE_VAR_1 (BT_FN_I4_I4_VAR, BT_I4, BT_I4) +DEF_FUNCTION_TYPE_VAR_1 (BT_FN_I8_I8_VAR, BT_I8, BT_I8) +DEF_FUNCTION_TYPE_VAR_1 (BT_FN_I16_I16_VAR, BT_I16, BT_I16) + +DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_FILEPTR_CONST_STRING_VAR, + BT_INT, BT_FILEPTR, BT_CONST_STRING) +DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_STRING_CONST_STRING_VAR, + BT_INT, BT_STRING, BT_CONST_STRING) +DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_CONST_STRING_CONST_STRING_VAR, + BT_INT, BT_CONST_STRING, BT_CONST_STRING) +DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_INT_CONST_STRING_VAR, + BT_INT, BT_INT, BT_CONST_STRING) +DEF_FUNCTION_TYPE_VAR_2 (BT_FN_PTR_CONST_PTR_SIZE_VAR, BT_PTR, + BT_CONST_PTR, BT_SIZE) +DEF_FUNCTION_TYPE_VAR_2 (BT_FN_VOID_INT_INT_VAR, BT_VOID, + BT_INT, BT_INT) + +DEF_FUNCTION_TYPE_VAR_3 (BT_FN_INT_STRING_SIZE_CONST_STRING_VAR, + BT_INT, BT_STRING, BT_SIZE, BT_CONST_STRING) +DEF_FUNCTION_TYPE_VAR_3 (BT_FN_SSIZE_STRING_SIZE_CONST_STRING_VAR, + BT_SSIZE, BT_STRING, BT_SIZE, BT_CONST_STRING) +DEF_FUNCTION_TYPE_VAR_3 (BT_FN_INT_FILEPTR_INT_CONST_STRING_VAR, + BT_INT, BT_FILEPTR, BT_INT, BT_CONST_STRING) + +DEF_FUNCTION_TYPE_VAR_4 (BT_FN_INT_STRING_INT_SIZE_CONST_STRING_VAR, + BT_INT, BT_STRING, BT_INT, BT_SIZE, BT_CONST_STRING) + +DEF_FUNCTION_TYPE_VAR_5 (BT_FN_INT_STRING_SIZE_INT_SIZE_CONST_STRING_VAR, + BT_INT, BT_STRING, BT_SIZE, BT_INT, BT_SIZE, + BT_CONST_STRING) + +DEF_FUNCTION_TYPE_VAR_5 (BT_FN_INT_INT_INT_INT_INT_INT_VAR, + BT_INT, BT_INT, BT_INT, BT_INT, BT_INT, BT_INT) + +DEF_FUNCTION_TYPE_VAR_6 (BT_FN_VOID_INT_OMPFN_SIZE_PTR_PTR_PTR_VAR, + BT_VOID, BT_INT, BT_PTR_FN_VOID_PTR, BT_SIZE, + BT_PTR, BT_PTR, BT_PTR) + +DEF_FUNCTION_TYPE_VAR_7 (BT_FN_VOID_INT_SIZE_PTR_PTR_PTR_INT_INT_VAR, + BT_VOID, BT_INT, BT_SIZE, BT_PTR, BT_PTR, + BT_PTR, BT_INT, BT_INT) + +DEF_POINTER_TYPE (BT_PTR_FN_VOID_VAR, BT_FN_VOID_VAR) +DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE, + BT_PTR, BT_PTR_FN_VOID_VAR, BT_PTR, BT_SIZE) + + +DEF_FUNCTION_TYPE_1 (BT_FN_I1_VPTR, BT_I1, BT_VOLATILE_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_I2_VPTR, BT_I2, BT_VOLATILE_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_I4_VPTR, BT_I4, BT_VOLATILE_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_I8_VPTR, BT_I8, BT_VOLATILE_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_VPTR, BT_FLOAT, BT_VOLATILE_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_CONST_DOUBLE_PTR, BT_DOUBLE, BT_DOUBLE_PTR) +DEF_FUNCTION_TYPE_1 (BT_FN_LDOUBLE_VPTR, BT_LONGDOUBLE, BT_VOLATILE_PTR) + +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I1, BT_VOID, BT_VOLATILE_PTR, BT_I1) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I2, BT_VOID, BT_VOLATILE_PTR, BT_I2) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I4, BT_VOID, BT_VOLATILE_PTR, BT_I4) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I8, BT_VOID, BT_VOLATILE_PTR, BT_I8) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_FLOAT, BT_VOID, BT_VOLATILE_PTR, BT_FLOAT) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_DOUBLE, BT_VOID, + BT_VOLATILE_PTR, BT_DOUBLE) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_LDOUBLE, BT_VOID, + BT_VOLATILE_PTR, BT_LONGDOUBLE) +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_SIZE, BT_VOID, + BT_VOLATILE_PTR, BT_SIZE) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/builtins.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/builtins.def new file mode 100644 index 0000000..005976f --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/builtins.def @@ -0,0 +1,1073 @@ +/* This file contains the definitions and documentation for the + builtins used in the GNU compiler. + Copyright (C) 2000-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Before including this file, you should define a macro: + + DEF_BUILTIN (ENUM, NAME, CLASS, TYPE, LIBTYPE, BOTH_P, + FALLBACK_P, NONANSI_P, ATTRS, IMPLICIT, COND) + + This macro will be called once for each builtin function. The + ENUM will be of type `enum built_in_function', and will indicate + which builtin function is being processed. The NAME of the builtin + function (which will always start with `__builtin_') is a string + literal. The CLASS is of type `enum built_in_class' and indicates + what kind of builtin is being processed. + + Some builtins are actually two separate functions. For example, + for `strcmp' there are two builtin functions; `__builtin_strcmp' + and `strcmp' itself. Both behave identically. Other builtins + define only the `__builtin' variant. If BOTH_P is TRUE, then this + builtin has both variants; otherwise, it is has only the first + variant. + + TYPE indicates the type of the function. The symbols correspond to + enumerals from builtin-types.def. If BOTH_P is true, then LIBTYPE + is the type of the non-`__builtin_' variant. Otherwise, LIBTYPE + should be ignored. + + If FALLBACK_P is true then, if for some reason, the compiler cannot + expand the builtin function directly, it will call the + corresponding library function (which does not have the + `__builtin_' prefix. + + If NONANSI_P is true, then the non-`__builtin_' variant is not an + ANSI/ISO library function, and so we should pretend it does not + exist when compiling in ANSI conformant mode. + + ATTRs is an attribute list as defined in builtin-attrs.def that + describes the attributes of this builtin function. + + IMPLICIT specifies condition when the builtin can be produced by + compiler. For instance C90 reserves floorf function, but does not + define it's meaning. When user uses floorf we may assume that the + floorf has the meaning we expect, but we can't produce floorf by + simplifying floor((double)float) since the runtime need not implement + it. + + The builtins is registered only if COND is true. */ + +/* A GCC builtin (like __builtin_saveregs) is provided by the + compiler, but does not correspond to a function in the standard + library. */ +#undef DEF_GCC_BUILTIN +#define DEF_GCC_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \ + false, false, false, ATTRS, true, true) + +/* Like DEF_GCC_BUILTIN, except we don't prepend "__builtin_". */ +#undef DEF_SYNC_BUILTIN +#define DEF_SYNC_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \ + false, false, false, ATTRS, true, true) + +/* A set of GCC builtins for _FloatN and _FloatNx types. TYPE_MACRO + is called with an argument such as FLOAT32 to produce the enum + value for the type. */ +#undef DEF_GCC_FLOATN_NX_BUILTINS +#define DEF_GCC_FLOATN_NX_BUILTINS(ENUM, NAME, TYPE_MACRO, ATTRS) \ + DEF_GCC_BUILTIN (ENUM ## F16, NAME "f16", TYPE_MACRO (FLOAT16), ATTRS) \ + DEF_GCC_BUILTIN (ENUM ## F32, NAME "f32", TYPE_MACRO (FLOAT32), ATTRS) \ + DEF_GCC_BUILTIN (ENUM ## F64, NAME "f64", TYPE_MACRO (FLOAT64), ATTRS) \ + DEF_GCC_BUILTIN (ENUM ## F128, NAME "f128", TYPE_MACRO (FLOAT128), ATTRS) \ + DEF_GCC_BUILTIN (ENUM ## F32X, NAME "f32x", TYPE_MACRO (FLOAT32X), ATTRS) \ + DEF_GCC_BUILTIN (ENUM ## F64X, NAME "f64x", TYPE_MACRO (FLOAT64X), ATTRS) \ + DEF_GCC_BUILTIN (ENUM ## F128X, NAME "f128x", TYPE_MACRO (FLOAT128X), ATTRS) + +/* A library builtin (like __builtin_strchr) is a builtin equivalent + of an ANSI/ISO standard library function. In addition to the + `__builtin' version, we will create an ordinary version (e.g, + `strchr') as well. If we cannot compute the answer using the + builtin function, we will fall back to the standard library + version. */ +#undef DEF_LIB_BUILTIN +#define DEF_LIB_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + true, true, false, ATTRS, true, true) + +/* Like DEF_LIB_BUILTIN, except that the function is not one that is + specified by ANSI/ISO C. So, when we're being fully conformant we + ignore the version of these builtins that does not begin with + __builtin. */ +#undef DEF_EXT_LIB_BUILTIN +#define DEF_EXT_LIB_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + true, true, true, ATTRS, false, true) + +/* A set of GCC builtins for _FloatN and _FloatNx types. TYPE_MACRO is called + with an argument such as FLOAT32 to produce the enum value for the type. If + we are compiling for the C language with GNU extensions, we enable the name + without the __builtin_ prefix as well as the name with the __builtin_ + prefix. C++ does not enable these names by default because they don't have + the _Float and _FloatX keywords, and a class based library should use + the __builtin_ names. */ +#undef DEF_FLOATN_BUILTIN +#define DEF_FLOATN_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + targetm.floatn_builtin_p ((int) ENUM), true, true, ATTRS, \ + false, true) +#undef DEF_EXT_LIB_FLOATN_NX_BUILTINS +#define DEF_EXT_LIB_FLOATN_NX_BUILTINS(ENUM, NAME, TYPE_MACRO, ATTRS) \ + DEF_FLOATN_BUILTIN (ENUM ## F16, NAME "f16", TYPE_MACRO (FLOAT16), ATTRS) \ + DEF_FLOATN_BUILTIN (ENUM ## F32, NAME "f32", TYPE_MACRO (FLOAT32), ATTRS) \ + DEF_FLOATN_BUILTIN (ENUM ## F64, NAME "f64", TYPE_MACRO (FLOAT64), ATTRS) \ + DEF_FLOATN_BUILTIN (ENUM ## F128, NAME "f128", TYPE_MACRO (FLOAT128), ATTRS) \ + DEF_FLOATN_BUILTIN (ENUM ## F32X, NAME "f32x", TYPE_MACRO (FLOAT32X), ATTRS) \ + DEF_FLOATN_BUILTIN (ENUM ## F64X, NAME "f64x", TYPE_MACRO (FLOAT64X), ATTRS) \ + DEF_FLOATN_BUILTIN (ENUM ## F128X, NAME "f128x", TYPE_MACRO (FLOAT128X), \ + ATTRS) + +/* Like DEF_LIB_BUILTIN, except that the function is only a part of + the standard in C94 or above. */ +#undef DEF_C94_BUILTIN +#define DEF_C94_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + true, true, !flag_isoc94, ATTRS, \ + targetm.libc_has_function (function_c94, NULL_TREE), true) + +/* Like DEF_LIB_BUILTIN, except that the function is only a part of + the standard in C99 or above. */ +#undef DEF_C99_BUILTIN +#define DEF_C99_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + true, true, !flag_isoc99, ATTRS, \ + targetm.libc_has_function (function_c99_misc, NULL_TREE), true) + +/* Like DEF_LIB_BUILTIN, except that the function is only a part of + the standard in C11 or above. */ +#undef DEF_C11_BUILTIN +#define DEF_C11_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + true, true, !flag_isoc11, ATTRS, \ + targetm.libc_has_function (function_c11_misc, NULL_TREE), true) + +/* Like DEF_LIB_BUILTIN, except that the function is only a part of + the standard in C2x or above. */ +#undef DEF_C2X_BUILTIN +#define DEF_C2X_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + true, true, !flag_isoc2x, ATTRS, \ + targetm.libc_has_function (function_c2x_misc, NULL_TREE), true) + +/* Like DEF_C99_BUILTIN, but for complex math functions. */ +#undef DEF_C99_COMPL_BUILTIN +#define DEF_C99_COMPL_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + true, true, !flag_isoc99, ATTRS, \ + targetm.libc_has_function (function_c99_math_complex, \ + NULL_TREE), \ + true) + +/* Builtin that is specified by C99 and C90 reserve the name for future use. + We can still recognize the builtin in C90 mode but we can't produce it + implicitly. */ +#undef DEF_C99_C90RES_BUILTIN +#define DEF_C99_C90RES_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + true, true, !flag_isoc99, ATTRS, \ + targetm.libc_has_function (function_c99_misc, NULL_TREE), true) + +/* Builtin that C99 reserve the name for future use. We can still recognize + the builtin in C99 mode but we can't produce it implicitly. */ +#undef DEF_EXT_C99RES_BUILTIN +#define DEF_EXT_C99RES_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + true, true, true, ATTRS, false, true) + +/* Allocate the enum and the name for a builtin, but do not actually + define it here at all. */ +#undef DEF_BUILTIN_STUB +#define DEF_BUILTIN_STUB(ENUM, NAME) \ + DEF_BUILTIN (ENUM, NAME, BUILT_IN_NORMAL, BT_LAST, BT_LAST, false, false, \ + false, ATTR_LAST, false, false) + +/* Builtins used in implementing coroutine support. */ +#undef DEF_COROUTINE_BUILTIN +#define DEF_COROUTINE_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_coro_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + true, true, true, ATTRS, true, flag_coroutines) + +/* Builtin used by the implementation of OpenACC and OpenMP. Few of these are + actually implemented in the compiler; most are in libgomp. */ +#undef DEF_GOACC_BUILTIN +#define DEF_GOACC_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + false, true, true, ATTRS, false, \ + flag_openacc) +#undef DEF_GOACC_BUILTIN_COMPILER +#define DEF_GOACC_BUILTIN_COMPILER(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + flag_openacc, true, true, ATTRS, false, true) +#undef DEF_GOACC_BUILTIN_ONLY +#define DEF_GOACC_BUILTIN_ONLY(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \ + false, false, true, ATTRS, false, flag_openacc) +#undef DEF_GOMP_BUILTIN +#define DEF_GOMP_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + false, true, true, ATTRS, false, \ + (flag_openacc \ + || flag_openmp \ + || flag_tree_parallelize_loops > 1)) + +/* Builtin used by the implementation of GNU TM. These + functions are mapped to the actual implementation of the STM library. */ +#undef DEF_TM_BUILTIN +#define DEF_TM_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \ + false, true, true, ATTRS, false, flag_tm) + +/* Builtin used by the implementation of libsanitizer. These + functions are mapped to the actual implementation of the + libtsan library. */ +#undef DEF_SANITIZER_BUILTIN +#define DEF_SANITIZER_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ + DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ + true, true, true, ATTRS, true, \ + (flag_sanitize & (SANITIZE_ADDRESS | SANITIZE_THREAD \ + | SANITIZE_HWADDRESS \ + | SANITIZE_UNDEFINED \ + | SANITIZE_UNDEFINED_NONDEFAULT) \ + || flag_sanitize_coverage)) + +/* Define an attribute list for math functions that are normally + "impure" because some of them may write into global memory for + `errno'. If !flag_errno_math they are instead "const". */ +#undef ATTR_MATHFN_ERRNO +#define ATTR_MATHFN_ERRNO (flag_errno_math ? \ + ATTR_ERRNOCONST_NOTHROW_LEAF_LIST : ATTR_CONST_NOTHROW_LEAF_LIST) + +/* Define an attribute list for math functions that are normally + "const" but if flag_rounding_math is set they are instead "pure". + This distinction accounts for the fact that some math functions + check the rounding mode which is akin to examining global + memory. */ +#undef ATTR_MATHFN_FPROUNDING +#define ATTR_MATHFN_FPROUNDING (flag_rounding_math ? \ + ATTR_PURE_NOTHROW_LEAF_LIST : ATTR_CONST_NOTHROW_LEAF_LIST) + +/* Define an attribute list for math functions that are normally + "impure" because some of them may write into global memory for + `errno'. If !flag_errno_math, we can possibly use "pure" or + "const" depending on whether we care about FP rounding. */ +#undef ATTR_MATHFN_FPROUNDING_ERRNO +#define ATTR_MATHFN_FPROUNDING_ERRNO (flag_errno_math ? \ + (flag_rounding_math ? ATTR_ERRNOPURE_NOTHROW_LEAF_LIST \ + : ATTR_ERRNOCONST_NOTHROW_LEAF_LIST) : ATTR_MATHFN_FPROUNDING) + +/* Define an attribute list for math functions that need to mind FP + rounding, but because they store into memory they are never "const" + or "pure". Use of this macro is mainly for documentation and + maintenance purposes. */ +#undef ATTR_MATHFN_FPROUNDING_STORE +#define ATTR_MATHFN_FPROUNDING_STORE ATTR_NOTHROW_LEAF_LIST + +/* Define an attribute list for leaf functions that do not throw + exceptions normally, but may throw exceptions when using + -fnon-call-exceptions. */ +#define ATTR_NOTHROWCALL_LEAF_LIST (flag_non_call_exceptions ? \ + ATTR_LEAF_LIST : ATTR_NOTHROW_LEAF_LIST) + +/* Make sure 0 is not a legitimate builtin. */ +DEF_BUILTIN_STUB(BUILT_IN_NONE, (const char *)0) + +/* Category: math builtins. */ +DEF_LIB_BUILTIN (BUILT_IN_ACOS, "acos", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_ACOSF, "acosf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_ACOSH, "acosh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_ACOSHF, "acoshf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_ACOSHL, "acoshl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_ACOSL, "acosl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C11_BUILTIN (BUILT_IN_ALIGNED_ALLOC, "aligned_alloc", BT_FN_PTR_SIZE_SIZE, ATTR_ALLOC_WARN_UNUSED_RESULT_SIZE_2_NOTHROW_LIST) +DEF_LIB_BUILTIN (BUILT_IN_ASIN, "asin", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_ASINF, "asinf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_ASINH, "asinh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_ASINHF, "asinhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_ASINHL, "asinhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_C90RES_BUILTIN (BUILT_IN_ASINL, "asinl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_LIB_BUILTIN (BUILT_IN_ATAN, "atan", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_LIB_BUILTIN (BUILT_IN_ATAN2, "atan2", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_ATAN2F, "atan2f", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_ATAN2L, "atan2l", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_ATANF, "atanf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_ATANH, "atanh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_ATANHF, "atanhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_ATANHL, "atanhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_ATANL, "atanl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_CBRT, "cbrt", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_CBRTF, "cbrtf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_CBRTL, "cbrtl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_LIB_BUILTIN (BUILT_IN_CEIL, "ceil", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_C90RES_BUILTIN (BUILT_IN_CEILF, "ceilf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_C90RES_BUILTIN (BUILT_IN_CEILL, "ceill", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +#define CEIL_TYPE(F) BT_FN_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CEIL, "ceil", CEIL_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +#undef CEIL_TYPE +DEF_C99_BUILTIN (BUILT_IN_COPYSIGN, "copysign", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_COPYSIGNF, "copysignf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_COPYSIGNL, "copysignl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +#define COPYSIGN_TYPE(F) BT_FN_##F##_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_COPYSIGN, "copysign", COPYSIGN_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +#undef COPYSIGN_TYPE +DEF_LIB_BUILTIN (BUILT_IN_COS, "cos", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_C90RES_BUILTIN (BUILT_IN_COSF, "cosf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_LIB_BUILTIN (BUILT_IN_COSH, "cosh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_COSHF, "coshf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_COSHL, "coshl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_COSL, "cosl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_EXT_LIB_BUILTIN (BUILT_IN_DREM, "drem", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_DREMF, "dremf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_DREML, "dreml", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_ERF, "erf", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_ERFC, "erfc", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_ERFCF, "erfcf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_ERFCL, "erfcl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_ERFF, "erff", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_ERFL, "erfl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_LIB_BUILTIN (BUILT_IN_EXP, "exp", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C2X_BUILTIN (BUILT_IN_EXP10, "exp10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C2X_BUILTIN (BUILT_IN_EXP10F, "exp10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C2X_BUILTIN (BUILT_IN_EXP10L, "exp10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_EXP2, "exp2", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_EXP2F, "exp2f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_EXP2L, "exp2l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_EXPF, "expf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_EXPL, "expl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_EXPM1, "expm1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_EXPM1F, "expm1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_EXPM1L, "expm1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_LIB_BUILTIN (BUILT_IN_FABS, "fabs", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_C90RES_BUILTIN (BUILT_IN_FABSF, "fabsf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_C90RES_BUILTIN (BUILT_IN_FABSL, "fabsl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +#define FABS_TYPE(F) BT_FN_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FABS, "fabs", FABS_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +#undef FABS_TYPE +DEF_C2X_BUILTIN (BUILT_IN_FABSD32, "fabsd32", BT_FN_DFLOAT32_DFLOAT32, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C2X_BUILTIN (BUILT_IN_FABSD64, "fabsd64", BT_FN_DFLOAT64_DFLOAT64, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C2X_BUILTIN (BUILT_IN_FABSD128, "fabsd128", BT_FN_DFLOAT128_DFLOAT128, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FDIM, "fdim", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_FDIMF, "fdimf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_FDIML, "fdiml", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_FECLEAREXCEPT, "feclearexcept", BT_FN_INT_INT, ATTR_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FEGETENV, "fegetenv", BT_FN_INT_FENV_T_PTR, ATTR_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FEGETEXCEPTFLAG, "fegetexceptflag", BT_FN_INT_FEXCEPT_T_PTR_INT, ATTR_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FEGETROUND, "fegetround", BT_FN_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FEHOLDEXCEPT, "feholdexcept", BT_FN_INT_FENV_T_PTR, ATTR_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FERAISEEXCEPT, "feraiseexcept", BT_FN_INT_INT, ATTR_NULL) +DEF_C99_BUILTIN (BUILT_IN_FESETENV, "fesetenv", BT_FN_INT_CONST_FENV_T_PTR, ATTR_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FESETEXCEPTFLAG, "fesetexceptflag", BT_FN_INT_CONST_FEXCEPT_T_PTR_INT, ATTR_NULL) +DEF_C99_BUILTIN (BUILT_IN_FESETROUND, "fesetround", BT_FN_INT_INT, ATTR_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FETESTEXCEPT, "fetestexcept", BT_FN_INT_INT, ATTR_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FEUPDATEENV, "feupdateenv", BT_FN_INT_CONST_FENV_T_PTR, ATTR_NULL) +DEF_LIB_BUILTIN (BUILT_IN_FLOOR, "floor", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_C90RES_BUILTIN (BUILT_IN_FLOORF, "floorf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_C90RES_BUILTIN (BUILT_IN_FLOORL, "floorl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +#define FLOOR_TYPE(F) BT_FN_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FLOOR, "floor", FLOOR_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +#undef FLOOR_TYPE +DEF_C99_BUILTIN (BUILT_IN_FMA, "fma", BT_FN_DOUBLE_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_FMAF, "fmaf", BT_FN_FLOAT_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_FMAL, "fmal", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +#define FMA_TYPE(F) BT_FN_##F##_##F##_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FMA, "fma", FMA_TYPE, ATTR_MATHFN_FPROUNDING) +#undef FMA_TYPE +DEF_C99_BUILTIN (BUILT_IN_FMAX, "fmax", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FMAXF, "fmaxf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FMAXL, "fmaxl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +#define FMAX_TYPE(F) BT_FN_##F##_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FMAX, "fmax", FMAX_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +#undef FMAX_TYPE +DEF_C99_BUILTIN (BUILT_IN_FMIN, "fmin", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FMINF, "fminf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_FMINL, "fminl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +#define FMIN_TYPE(F) BT_FN_##F##_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FMIN, "fmin", FMIN_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +#undef FMIN_TYPE +DEF_LIB_BUILTIN (BUILT_IN_FMOD, "fmod", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_FMODF, "fmodf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_FMODL, "fmodl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_LIB_BUILTIN (BUILT_IN_FREXP, "frexp", BT_FN_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_C99_C90RES_BUILTIN (BUILT_IN_FREXPF, "frexpf", BT_FN_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_C99_C90RES_BUILTIN (BUILT_IN_FREXPL, "frexpl", BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMA, "gamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_STORE) +DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAF, "gammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_STORE) +DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAL, "gammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_STORE) +DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMA_R, "gamma_r", BT_FN_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAF_R, "gammaf_r", BT_FN_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAL_R, "gammal_r", BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_GCC_BUILTIN (BUILT_IN_HUGE_VAL, "huge_val", BT_FN_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_HUGE_VALF, "huge_valf", BT_FN_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_HUGE_VALL, "huge_vall", BT_FN_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +#define INF_TYPE(F) BT_FN_##F +DEF_GCC_FLOATN_NX_BUILTINS (BUILT_IN_HUGE_VAL, "huge_val", INF_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_HYPOT, "hypot", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_HYPOTF, "hypotf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_HYPOTL, "hypotl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_GCC_BUILTIN (BUILT_IN_ICEIL, "iceil", BT_FN_INT_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_ICEILF, "iceilf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_ICEILL, "iceill", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_IFLOOR, "ifloor", BT_FN_INT_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_IFLOORF, "ifloorf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_IFLOORL, "ifloorl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_ILOGB, "ilogb", BT_FN_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_ILOGBF, "ilogbf", BT_FN_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_ILOGBL, "ilogbl", BT_FN_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_GCC_BUILTIN (BUILT_IN_INF, "inf", BT_FN_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_INFF, "inff", BT_FN_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_INFL, "infl", BT_FN_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_FLOATN_NX_BUILTINS (BUILT_IN_INF, "inf", INF_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +#undef INF_TYPE +DEF_GCC_BUILTIN (BUILT_IN_INFD32, "infd32", BT_FN_DFLOAT32, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_INFD64, "infd64", BT_FN_DFLOAT64, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_INFD128, "infd128", BT_FN_DFLOAT128, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_IRINT, "irint", BT_FN_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_GCC_BUILTIN (BUILT_IN_IRINTF, "irintf", BT_FN_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_GCC_BUILTIN (BUILT_IN_IRINTL, "irintl", BT_FN_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_GCC_BUILTIN (BUILT_IN_IROUND, "iround", BT_FN_INT_DOUBLE, ATTR_MATHFN_ERRNO) +DEF_GCC_BUILTIN (BUILT_IN_IROUNDF, "iroundf", BT_FN_INT_FLOAT, ATTR_MATHFN_ERRNO) +DEF_GCC_BUILTIN (BUILT_IN_IROUNDL, "iroundl", BT_FN_INT_LONGDOUBLE, ATTR_MATHFN_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_J0, "j0", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_J0F, "j0f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_J0L, "j0l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_J1, "j1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_J1F, "j1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_J1L, "j1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_JN, "jn", BT_FN_DOUBLE_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_JNF, "jnf", BT_FN_FLOAT_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_JNL, "jnl", BT_FN_LONGDOUBLE_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_GCC_BUILTIN (BUILT_IN_LCEIL, "lceil", BT_FN_LONG_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_LCEILF, "lceilf", BT_FN_LONG_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_LCEILL, "lceill", BT_FN_LONG_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_LDEXP, "ldexp", BT_FN_DOUBLE_DOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_LDEXPF, "ldexpf", BT_FN_FLOAT_FLOAT_INT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_LDEXPL, "ldexpl", BT_FN_LONGDOUBLE_LONGDOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_GCC_BUILTIN (BUILT_IN_LFLOOR, "lfloor", BT_FN_LONG_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_LFLOORF, "lfloorf", BT_FN_LONG_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_LFLOORL, "lfloorl", BT_FN_LONG_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_LGAMMA, "lgamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_STORE) +DEF_C99_BUILTIN (BUILT_IN_LGAMMAF, "lgammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_STORE) +DEF_C99_BUILTIN (BUILT_IN_LGAMMAL, "lgammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_STORE) +DEF_EXT_LIB_BUILTIN (BUILT_IN_LGAMMA_R, "lgamma_r", BT_FN_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_EXT_LIB_BUILTIN (BUILT_IN_LGAMMAF_R, "lgammaf_r", BT_FN_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_EXT_LIB_BUILTIN (BUILT_IN_LGAMMAL_R, "lgammal_r", BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_GCC_BUILTIN (BUILT_IN_LLCEIL, "llceil", BT_FN_LONGLONG_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_LLCEILF, "llceilf", BT_FN_LONGLONG_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_LLCEILL, "llceill", BT_FN_LONGLONG_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_LLFLOOR, "llfloor", BT_FN_LONGLONG_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_LLFLOORF, "llfloorf", BT_FN_LONGLONG_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_LLFLOORL, "llfloorl", BT_FN_LONGLONG_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_LLRINT, "llrint", BT_FN_LONGLONG_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LLRINTF, "llrintf", BT_FN_LONGLONG_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LLRINTL, "llrintl", BT_FN_LONGLONG_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LLROUND, "llround", BT_FN_LONGLONG_DOUBLE, ATTR_MATHFN_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LLROUNDF, "llroundf", BT_FN_LONGLONG_FLOAT, ATTR_MATHFN_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LLROUNDL, "llroundl", BT_FN_LONGLONG_LONGDOUBLE, ATTR_MATHFN_ERRNO) +DEF_LIB_BUILTIN (BUILT_IN_LOG, "log", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_LIB_BUILTIN (BUILT_IN_LOG10, "log10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_LOG10F, "log10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_LOG10L, "log10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LOG1P, "log1p", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LOG1PF, "log1pf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LOG1PL, "log1pl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LOG2, "log2", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LOG2F, "log2f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LOG2L, "log2l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LOGB, "logb", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LOGBF, "logbf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LOGBL, "logbl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_LOGF, "logf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_LOGL, "logl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LRINT, "lrint", BT_FN_LONG_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LRINTF, "lrintf", BT_FN_LONG_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LRINTL, "lrintl", BT_FN_LONG_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LROUND, "lround", BT_FN_LONG_DOUBLE, ATTR_MATHFN_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LROUNDF, "lroundf", BT_FN_LONG_FLOAT, ATTR_MATHFN_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_LROUNDL, "lroundl", BT_FN_LONG_LONGDOUBLE, ATTR_MATHFN_ERRNO) +DEF_LIB_BUILTIN (BUILT_IN_MODF, "modf", BT_FN_DOUBLE_DOUBLE_DOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_C99_C90RES_BUILTIN (BUILT_IN_MODFF, "modff", BT_FN_FLOAT_FLOAT_FLOATPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_C99_C90RES_BUILTIN (BUILT_IN_MODFL, "modfl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_C99_BUILTIN (BUILT_IN_NAN, "nan", BT_FN_DOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL) +DEF_C99_BUILTIN (BUILT_IN_NANF, "nanf", BT_FN_FLOAT_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL) +DEF_C99_BUILTIN (BUILT_IN_NANL, "nanl", BT_FN_LONGDOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL) +#define NAN_TYPE(F) BT_FN_##F##_CONST_STRING +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_NAN, "nan", NAN_TYPE, ATTR_CONST_NOTHROW_NONNULL) +DEF_C2X_BUILTIN (BUILT_IN_NAND32, "nand32", BT_FN_DFLOAT32_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL) +DEF_C2X_BUILTIN (BUILT_IN_NAND64, "nand64", BT_FN_DFLOAT64_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL) +DEF_C2X_BUILTIN (BUILT_IN_NAND128, "nand128", BT_FN_DFLOAT128_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL) +DEF_GCC_BUILTIN (BUILT_IN_NANS, "nans", BT_FN_DOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL) +DEF_GCC_BUILTIN (BUILT_IN_NANSF, "nansf", BT_FN_FLOAT_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL) +DEF_GCC_BUILTIN (BUILT_IN_NANSL, "nansl", BT_FN_LONGDOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL) +DEF_GCC_FLOATN_NX_BUILTINS (BUILT_IN_NANS, "nans", NAN_TYPE, ATTR_CONST_NOTHROW_NONNULL) +#undef NAN_TYPE +DEF_GCC_BUILTIN (BUILT_IN_NANSD32, "nansd32", BT_FN_DFLOAT32_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL) +DEF_GCC_BUILTIN (BUILT_IN_NANSD64, "nansd64", BT_FN_DFLOAT64_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL) +DEF_GCC_BUILTIN (BUILT_IN_NANSD128, "nansd128", BT_FN_DFLOAT128_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL) +DEF_C99_BUILTIN (BUILT_IN_NEARBYINT, "nearbyint", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_NEARBYINTF, "nearbyintf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_NEARBYINTL, "nearbyintl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +#define NEARBYINT_TYPE(F) BT_FN_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_NEARBYINT, "nearbyint", NEARBYINT_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +#undef NEARBYINT_TYPE +DEF_C99_BUILTIN (BUILT_IN_NEXTAFTER, "nextafter", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_NEXTAFTERF, "nextafterf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_NEXTAFTERL, "nextafterl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARD, "nexttoward", BT_FN_DOUBLE_DOUBLE_LONGDOUBLE, ATTR_MATHFN_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARDF, "nexttowardf", BT_FN_FLOAT_FLOAT_LONGDOUBLE, ATTR_MATHFN_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARDL, "nexttowardl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_ERRNO) +DEF_LIB_BUILTIN (BUILT_IN_POW, "pow", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10, "pow10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10F, "pow10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10L, "pow10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_POWF, "powf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_GCC_BUILTIN (BUILT_IN_POWI, "powi", BT_FN_DOUBLE_DOUBLE_INT, ATTR_MATHFN_FPROUNDING) +DEF_GCC_BUILTIN (BUILT_IN_POWIF, "powif", BT_FN_FLOAT_FLOAT_INT, ATTR_MATHFN_FPROUNDING) +DEF_GCC_BUILTIN (BUILT_IN_POWIL, "powil", BT_FN_LONGDOUBLE_LONGDOUBLE_INT, ATTR_MATHFN_FPROUNDING) +DEF_C99_C90RES_BUILTIN (BUILT_IN_POWL, "powl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_REMAINDER, "remainder", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_REMAINDERF, "remainderf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_REMAINDERL, "remainderl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_REMQUO, "remquo", BT_FN_DOUBLE_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_C99_BUILTIN (BUILT_IN_REMQUOF, "remquof", BT_FN_FLOAT_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_C99_BUILTIN (BUILT_IN_REMQUOL, "remquol", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_C99_BUILTIN (BUILT_IN_RINT, "rint", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_RINTF, "rintf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_RINTL, "rintl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +#define RINT_TYPE(F) BT_FN_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_RINT, "rint", RINT_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +#undef RINT_TYPE +DEF_C2X_BUILTIN (BUILT_IN_ROUNDEVEN, "roundeven", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C2X_BUILTIN (BUILT_IN_ROUNDEVENF, "roundevenf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C2X_BUILTIN (BUILT_IN_ROUNDEVENL, "roundevenl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_ROUND, "round", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_ROUNDF, "roundf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_ROUNDL, "roundl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +#define ROUND_TYPE(F) BT_FN_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ROUND, "round", ROUND_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +#undef ROUND_TYPE +#define ROUNDEVEN_TYPE(F) BT_FN_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ROUNDEVEN, "roundeven", ROUNDEVEN_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +#undef ROUNDEVEN_TYPE +DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALB, "scalb", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALBF, "scalbf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALBL, "scalbl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_SCALBLN, "scalbln", BT_FN_DOUBLE_DOUBLE_LONG, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_SCALBLNF, "scalblnf", BT_FN_FLOAT_FLOAT_LONG, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_SCALBLNL, "scalblnl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONG, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_SCALBN, "scalbn", BT_FN_DOUBLE_DOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_SCALBNF, "scalbnf", BT_FN_FLOAT_FLOAT_INT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_SCALBNL, "scalbnl", BT_FN_LONGDOUBLE_LONGDOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBIT, "signbit", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITF, "signbitf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITL, "signbitl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITD32, "signbitd32", BT_FN_INT_DFLOAT32, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITD64, "signbitd64", BT_FN_INT_DFLOAT64, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITD128, "signbitd128", BT_FN_INT_DFLOAT128, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICAND, "significand", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICANDF, "significandf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICANDL, "significandl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_LIB_BUILTIN (BUILT_IN_SIN, "sin", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOS, "sincos", BT_FN_VOID_DOUBLE_DOUBLEPTR_DOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOSF, "sincosf", BT_FN_VOID_FLOAT_FLOATPTR_FLOATPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOSL, "sincosl", BT_FN_VOID_LONGDOUBLE_LONGDOUBLEPTR_LONGDOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) +DEF_C99_C90RES_BUILTIN (BUILT_IN_SINF, "sinf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_LIB_BUILTIN (BUILT_IN_SINH, "sinh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_SINHF, "sinhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_SINHL, "sinhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_SINL, "sinl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_LIB_BUILTIN (BUILT_IN_SQRT, "sqrt", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_SQRTF, "sqrtf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_C90RES_BUILTIN (BUILT_IN_SQRTL, "sqrtl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +#define SQRT_TYPE(F) BT_FN_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_SQRT, "sqrt", SQRT_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO) +#undef SQRT_TYPE +DEF_LIB_BUILTIN (BUILT_IN_TAN, "tan", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_C90RES_BUILTIN (BUILT_IN_TANF, "tanf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_LIB_BUILTIN (BUILT_IN_TANH, "tanh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_C90RES_BUILTIN (BUILT_IN_TANHF, "tanhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_C90RES_BUILTIN (BUILT_IN_TANHL, "tanhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_C90RES_BUILTIN (BUILT_IN_TANL, "tanl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_BUILTIN (BUILT_IN_TGAMMA, "tgamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_TGAMMAF, "tgammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_TGAMMAL, "tgammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_C99_BUILTIN (BUILT_IN_TRUNC, "trunc", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_TRUNCF, "truncf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_TRUNCL, "truncl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +#define TRUNC_TYPE(F) BT_FN_##F##_##F +DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_TRUNC, "trunc", TRUNC_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST) +#undef TRUNC_TYPE +DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0, "y0", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0F, "y0f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0L, "y0l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1, "y1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1F, "y1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1L, "y1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_YN, "yn", BT_FN_DOUBLE_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_YNF, "ynf", BT_FN_FLOAT_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) +DEF_EXT_LIB_BUILTIN (BUILT_IN_YNL, "ynl", BT_FN_LONGDOUBLE_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) + +/* Category: _Complex math builtins. */ +DEF_C99_COMPL_BUILTIN (BUILT_IN_CABS, "cabs", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CABSF, "cabsf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CABSL, "cabsl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CACOS, "cacos", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CACOSF, "cacosf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CACOSH, "cacosh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CACOSHF, "cacoshf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CACOSHL, "cacoshl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CACOSL, "cacosl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CARG, "carg", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CARGF, "cargf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CARGL, "cargl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CASIN, "casin", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CASINF, "casinf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CASINH, "casinh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CASINHF, "casinhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CASINHL, "casinhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CASINL, "casinl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CATAN, "catan", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CATANF, "catanf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CATANH, "catanh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CATANHF, "catanhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CATANHL, "catanhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CATANL, "catanl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CCOS, "ccos", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CCOSF, "ccosf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CCOSH, "ccosh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CCOSHF, "ccoshf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CCOSHL, "ccoshl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CCOSL, "ccosl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CEXP, "cexp", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CEXPF, "cexpf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CEXPL, "cexpl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_GCC_BUILTIN (BUILT_IN_CEXPI, "cexpi", BT_FN_COMPLEX_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_GCC_BUILTIN (BUILT_IN_CEXPIF, "cexpif", BT_FN_COMPLEX_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_GCC_BUILTIN (BUILT_IN_CEXPIL, "cexpil", BT_FN_COMPLEX_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CIMAG, "cimag", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CIMAGF, "cimagf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CIMAGL, "cimagl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CLOG, "clog", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CLOGF, "clogf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CLOGL, "clogl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_EXT_C99RES_BUILTIN (BUILT_IN_CLOG10, "clog10", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_EXT_C99RES_BUILTIN (BUILT_IN_CLOG10F, "clog10f", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_EXT_C99RES_BUILTIN (BUILT_IN_CLOG10L, "clog10l", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CONJ, "conj", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CONJF, "conjf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CONJL, "conjl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CPOW, "cpow", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CPOWF, "cpowf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CPOWL, "cpowl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CPROJ, "cproj", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CPROJF, "cprojf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CPROJL, "cprojl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CREAL, "creal", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CREALF, "crealf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CREALL, "creall", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CSIN, "csin", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CSINF, "csinf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CSINH, "csinh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CSINHF, "csinhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CSINHL, "csinhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CSINL, "csinl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CSQRT, "csqrt", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CSQRTF, "csqrtf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CSQRTL, "csqrtl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CTAN, "ctan", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CTANF, "ctanf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CTANH, "ctanh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CTANHF, "ctanhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CTANHL, "ctanhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) +DEF_C99_COMPL_BUILTIN (BUILT_IN_CTANL, "ctanl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) + +/* Category: string/memory builtins. */ +DEF_EXT_LIB_BUILTIN (BUILT_IN_BCMP, "bcmp", BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_BCOPY, "bcopy", BT_FN_VOID_CONST_PTR_PTR_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_BZERO, "bzero", BT_FN_VOID_PTR_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_INDEX, "index", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_MEMCHR, "memchr", BT_FN_PTR_CONST_PTR_INT_SIZE, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_MEMCMP, "memcmp", BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_MEMCPY, "memcpy", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_MEMMOVE, "memmove", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMPCPY, "mempcpy", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_RETNONNULL_NOTHROW_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_MEMSET, "memset", BT_FN_PTR_PTR_INT_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_RINDEX, "rindex", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_STPCPY, "stpcpy", BT_FN_STRING_STRING_CONST_STRING, ATTR_RETNONNULL_NOTHROW_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_STPNCPY, "stpncpy", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_RETNONNULL_NOTHROW_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_STRCASECMP, "strcasecmp", BT_FN_INT_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRCAT, "strcat", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRCHR, "strchr", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRCMP, "strcmp", BT_FN_INT_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRCPY, "strcpy", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRCSPN, "strcspn", BT_FN_SIZE_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_C2X_BUILTIN (BUILT_IN_STRDUP, "strdup", BT_FN_STRING_CONST_STRING, ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_NONNULL_LEAF) +DEF_C2X_BUILTIN (BUILT_IN_STRNDUP, "strndup", BT_FN_STRING_CONST_STRING_SIZE, ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRLEN, "strlen", BT_FN_SIZE_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_STRNCASECMP, "strncasecmp", BT_FN_INT_CONST_STRING_CONST_STRING_SIZE, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRNCAT, "strncat", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRNCMP, "strncmp", BT_FN_INT_CONST_STRING_CONST_STRING_SIZE, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRNCPY, "strncpy", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_STRNLEN, "strnlen", BT_FN_SIZE_CONST_STRING_SIZE, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRPBRK, "strpbrk", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRRCHR, "strrchr", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRSPN, "strspn", BT_FN_SIZE_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_STRSTR, "strstr", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF) + +/* Category: stdio builtins. */ +DEF_LIB_BUILTIN (BUILT_IN_FPRINTF, "fprintf", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_NONNULL_1_FORMAT_PRINTF_2_3) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FPRINTF_UNLOCKED, "fprintf_unlocked", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_NONNULL_1_FORMAT_PRINTF_2_3) +DEF_LIB_BUILTIN (BUILT_IN_PUTC, "putc", BT_FN_INT_INT_FILEPTR, ATTR_NONNULL_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_PUTC_UNLOCKED, "putc_unlocked", BT_FN_INT_INT_FILEPTR, ATTR_NONNULL_LIST) +DEF_LIB_BUILTIN (BUILT_IN_FPUTC, "fputc", BT_FN_INT_INT_FILEPTR, ATTR_NONNULL_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FPUTC_UNLOCKED, "fputc_unlocked", BT_FN_INT_INT_FILEPTR, ATTR_NONNULL_LIST) +DEF_LIB_BUILTIN (BUILT_IN_FPUTS, "fputs", BT_FN_INT_CONST_STRING_FILEPTR, ATTR_NONNULL_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FPUTS_UNLOCKED, "fputs_unlocked", BT_FN_INT_CONST_STRING_FILEPTR, ATTR_NONNULL_LIST) +DEF_LIB_BUILTIN (BUILT_IN_FSCANF, "fscanf", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_FORMAT_SCANF_2_3) +DEF_LIB_BUILTIN (BUILT_IN_FWRITE, "fwrite", BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, ATTR_NONNULL_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FWRITE_UNLOCKED, "fwrite_unlocked", BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, ATTR_NONNULL_LIST) +DEF_LIB_BUILTIN (BUILT_IN_PRINTF, "printf", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_1_2) +DEF_EXT_LIB_BUILTIN (BUILT_IN_PRINTF_UNLOCKED, "printf_unlocked", BT_FN_INT_CONST_STRING_VAR, ATTR_NONNULL_1_FORMAT_PRINTF_1_2) +DEF_LIB_BUILTIN (BUILT_IN_PUTCHAR, "putchar", BT_FN_INT_INT, ATTR_NULL) +DEF_EXT_LIB_BUILTIN (BUILT_IN_PUTCHAR_UNLOCKED, "putchar_unlocked", BT_FN_INT_INT, ATTR_NULL) +DEF_LIB_BUILTIN (BUILT_IN_PUTS, "puts", BT_FN_INT_CONST_STRING, ATTR_NONNULL_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_PUTS_UNLOCKED, "puts_unlocked", BT_FN_INT_CONST_STRING, ATTR_NONNULL_LIST) +DEF_LIB_BUILTIN (BUILT_IN_SCANF, "scanf", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_SCANF_1_2) +DEF_C99_BUILTIN (BUILT_IN_SNPRINTF, "snprintf", BT_FN_INT_STRING_SIZE_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_NOTHROW_3_4) + +DEF_LIB_BUILTIN (BUILT_IN_SPRINTF, "sprintf", BT_FN_INT_STRING_CONST_STRING_VAR, ATTR_NOTHROW_NONNULL_1_FORMAT_PRINTF_2_3) +DEF_LIB_BUILTIN (BUILT_IN_SSCANF, "sscanf", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_FORMAT_SCANF_NOTHROW_2_3) +DEF_LIB_BUILTIN (BUILT_IN_VFPRINTF, "vfprintf", BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, ATTR_NONNULL_1_FORMAT_PRINTF_2_0) +DEF_C99_BUILTIN (BUILT_IN_VFSCANF, "vfscanf", BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_2_0) +DEF_LIB_BUILTIN (BUILT_IN_VPRINTF, "vprintf", BT_FN_INT_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_1_0) +DEF_C99_BUILTIN (BUILT_IN_VSCANF, "vscanf", BT_FN_INT_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_1_0) +DEF_C99_BUILTIN (BUILT_IN_VSNPRINTF, "vsnprintf", BT_FN_INT_STRING_SIZE_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_NOTHROW_3_0) +DEF_LIB_BUILTIN (BUILT_IN_VSPRINTF, "vsprintf", BT_FN_INT_STRING_CONST_STRING_VALIST_ARG, ATTR_NOTHROW_NONNULL_1_FORMAT_PRINTF_2_0) +DEF_C99_BUILTIN (BUILT_IN_VSSCANF, "vsscanf", BT_FN_INT_CONST_STRING_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_NOTHROW_2_0) + +/* Category: ctype builtins. */ +DEF_LIB_BUILTIN (BUILT_IN_ISALNUM, "isalnum", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_ISALPHA, "isalpha", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_ISASCII, "isascii", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_ISBLANK, "isblank", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_ISCNTRL, "iscntrl", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_ISDIGIT, "isdigit", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_ISGRAPH, "isgraph", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_ISLOWER, "islower", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_ISPRINT, "isprint", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_ISPUNCT, "ispunct", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_ISSPACE, "isspace", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_ISUPPER, "isupper", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_ISXDIGIT, "isxdigit", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_TOASCII, "toascii", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_TOLOWER, "tolower", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_TOUPPER, "toupper", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST) + +/* Category: wctype builtins. */ +DEF_C94_BUILTIN (BUILT_IN_ISWALNUM, "iswalnum", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C94_BUILTIN (BUILT_IN_ISWALPHA, "iswalpha", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_ISWBLANK, "iswblank", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C94_BUILTIN (BUILT_IN_ISWCNTRL, "iswcntrl", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C94_BUILTIN (BUILT_IN_ISWDIGIT, "iswdigit", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C94_BUILTIN (BUILT_IN_ISWGRAPH, "iswgraph", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C94_BUILTIN (BUILT_IN_ISWLOWER, "iswlower", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C94_BUILTIN (BUILT_IN_ISWPRINT, "iswprint", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C94_BUILTIN (BUILT_IN_ISWPUNCT, "iswpunct", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C94_BUILTIN (BUILT_IN_ISWSPACE, "iswspace", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C94_BUILTIN (BUILT_IN_ISWUPPER, "iswupper", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C94_BUILTIN (BUILT_IN_ISWXDIGIT, "iswxdigit", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C94_BUILTIN (BUILT_IN_TOWLOWER, "towlower", BT_FN_WINT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_C94_BUILTIN (BUILT_IN_TOWUPPER, "towupper", BT_FN_WINT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST) + +/* Category: integer overflow checking builtins. */ +DEF_GCC_BUILTIN (BUILT_IN_ADD_OVERFLOW, "add_overflow", BT_FN_BOOL_VAR, ATTR_NOTHROW_NONNULL_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_SUB_OVERFLOW, "sub_overflow", BT_FN_BOOL_VAR, ATTR_NOTHROW_NONNULL_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_MUL_OVERFLOW, "mul_overflow", BT_FN_BOOL_VAR, ATTR_NOTHROW_NONNULL_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_ADD_OVERFLOW_P, "add_overflow_p", BT_FN_BOOL_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_SUB_OVERFLOW_P, "sub_overflow_p", BT_FN_BOOL_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_MUL_OVERFLOW_P, "mul_overflow_p", BT_FN_BOOL_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +/* Clang compatibility. */ +DEF_GCC_BUILTIN (BUILT_IN_SADD_OVERFLOW, "sadd_overflow", BT_FN_BOOL_INT_INT_INTPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SADDL_OVERFLOW, "saddl_overflow", BT_FN_BOOL_LONG_LONG_LONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SADDLL_OVERFLOW, "saddll_overflow", BT_FN_BOOL_LONGLONG_LONGLONG_LONGLONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SSUB_OVERFLOW, "ssub_overflow", BT_FN_BOOL_INT_INT_INTPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SSUBL_OVERFLOW, "ssubl_overflow", BT_FN_BOOL_LONG_LONG_LONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SSUBLL_OVERFLOW, "ssubll_overflow", BT_FN_BOOL_LONGLONG_LONGLONG_LONGLONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SMUL_OVERFLOW, "smul_overflow", BT_FN_BOOL_INT_INT_INTPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SMULL_OVERFLOW, "smull_overflow", BT_FN_BOOL_LONG_LONG_LONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SMULLL_OVERFLOW, "smulll_overflow", BT_FN_BOOL_LONGLONG_LONGLONG_LONGLONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_UADD_OVERFLOW, "uadd_overflow", BT_FN_BOOL_UINT_UINT_UINTPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_UADDL_OVERFLOW, "uaddl_overflow", BT_FN_BOOL_ULONG_ULONG_ULONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_UADDLL_OVERFLOW, "uaddll_overflow", BT_FN_BOOL_ULONGLONG_ULONGLONG_ULONGLONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_USUB_OVERFLOW, "usub_overflow", BT_FN_BOOL_UINT_UINT_UINTPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_USUBL_OVERFLOW, "usubl_overflow", BT_FN_BOOL_ULONG_ULONG_ULONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_USUBLL_OVERFLOW, "usubll_overflow", BT_FN_BOOL_ULONGLONG_ULONGLONG_ULONGLONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_UMUL_OVERFLOW, "umul_overflow", BT_FN_BOOL_UINT_UINT_UINTPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_UMULL_OVERFLOW, "umull_overflow", BT_FN_BOOL_ULONG_ULONG_ULONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_UMULLL_OVERFLOW, "umulll_overflow", BT_FN_BOOL_ULONGLONG_ULONGLONG_ULONGLONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST) + +/* Category: miscellaneous builtins. */ +DEF_LIB_BUILTIN (BUILT_IN_ABORT, "abort", BT_FN_VOID, ATTR_TMPURE_NORETURN_NOTHROW_LEAF_COLD_LIST) +DEF_LIB_BUILTIN (BUILT_IN_ABS, "abs", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_AGGREGATE_INCOMING_ADDRESS, "aggregate_incoming_address", BT_FN_PTR_VAR, ATTR_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_ALLOCA, "alloca", BT_FN_PTR_SIZE, ATTR_ALLOCA_WARN_UNUSED_RESULT_SIZE_1_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_APPLY, "apply", BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE, ATTR_NULL) +DEF_GCC_BUILTIN (BUILT_IN_APPLY_ARGS, "apply_args", BT_FN_PTR_VAR, ATTR_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_BSWAP16, "bswap16", BT_FN_UINT16_UINT16, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_BSWAP32, "bswap32", BT_FN_UINT32_UINT32, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_BSWAP64, "bswap64", BT_FN_UINT64_UINT64, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_BSWAP128, "bswap128", BT_FN_UINT128_UINT128, ATTR_CONST_NOTHROW_LEAF_LIST) + +DEF_EXT_LIB_BUILTIN (BUILT_IN_CLEAR_CACHE, "__clear_cache", BT_FN_VOID_PTR_PTR, ATTR_NOTHROW_LEAF_LIST) +/* [trans-mem]: Adjust BUILT_IN_TM_CALLOC if BUILT_IN_CALLOC is changed. */ +DEF_LIB_BUILTIN (BUILT_IN_CALLOC, "calloc", BT_FN_PTR_SIZE_SIZE, ATTR_MALLOC_WARN_UNUSED_RESULT_SIZE_1_2_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CLASSIFY_TYPE, "classify_type", BT_FN_INT_VAR, ATTR_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CLEAR_PADDING, "clear_padding", BT_FN_VOID_VAR, ATTR_NOTHROW_NONNULL_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_CLZ, "clz", BT_FN_INT_UINT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CLZIMAX, "clzimax", BT_FN_INT_UINTMAX, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CLZL, "clzl", BT_FN_INT_ULONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CLZLL, "clzll", BT_FN_INT_ULONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CONSTANT_P, "constant_p", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CTZ, "ctz", BT_FN_INT_UINT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CTZIMAX, "ctzimax", BT_FN_INT_UINTMAX, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CTZL, "ctzl", BT_FN_INT_ULONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CTZLL, "ctzll", BT_FN_INT_ULONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CLRSB, "clrsb", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CLRSBIMAX, "clrsbimax", BT_FN_INT_INTMAX, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CLRSBL, "clrsbl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_CLRSBLL, "clrsbll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_DCGETTEXT, "dcgettext", BT_FN_STRING_CONST_STRING_CONST_STRING_INT, ATTR_FORMAT_ARG_2) +DEF_EXT_LIB_BUILTIN (BUILT_IN_DGETTEXT, "dgettext", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_FORMAT_ARG_2) +DEF_GCC_BUILTIN (BUILT_IN_DWARF_CFA, "dwarf_cfa", BT_FN_PTR, ATTR_NULL) +DEF_GCC_BUILTIN (BUILT_IN_DWARF_SP_COLUMN, "dwarf_sp_column", BT_FN_UINT, ATTR_NULL) +DEF_GCC_BUILTIN (BUILT_IN_EH_RETURN, "eh_return", BT_FN_VOID_PTRMODE_PTR, ATTR_NORETURN_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_EH_RETURN_DATA_REGNO, "eh_return_data_regno", BT_FN_INT_INT, ATTR_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECL, "execl", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_SENTINEL_NOTHROW_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECLP, "execlp", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_SENTINEL_NOTHROW_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECLE, "execle", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_NOTHROW_SENTINEL_1) +DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECV, "execv", BT_FN_INT_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECVP, "execvp", BT_FN_INT_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECVE, "execve", BT_FN_INT_CONST_STRING_PTR_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST) +DEF_LIB_BUILTIN (BUILT_IN_EXIT, "exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LIST) +DEF_GCC_BUILTIN (BUILT_IN_EXPECT, "expect", BT_FN_LONG_LONG_LONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_EXPECT_WITH_PROBABILITY, "expect_with_probability", BT_FN_LONG_LONG_LONG_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_ASSUME_ALIGNED, "assume_aligned", BT_FN_PTR_CONST_PTR_SIZE_VAR, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_EXTEND_POINTER, "extend_pointer", BT_FN_UNWINDWORD_PTR, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_EXTRACT_RETURN_ADDR, "extract_return_addr", BT_FN_PTR_PTR, ATTR_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FFS, "ffs", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSIMAX, "ffsimax", BT_FN_INT_INTMAX, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSL, "ffsl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FORK, "fork", BT_FN_PID, ATTR_NOTHROW_LIST) +DEF_GCC_BUILTIN (BUILT_IN_FRAME_ADDRESS, "frame_address", BT_FN_PTR_UINT, ATTR_NULL) +/* [trans-mem]: Adjust BUILT_IN_TM_FREE if BUILT_IN_FREE is changed. */ +DEF_LIB_BUILTIN (BUILT_IN_FREE, "free", BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_FROB_RETURN_ADDR, "frob_return_addr", BT_FN_PTR_PTR, ATTR_NULL) +DEF_EXT_LIB_BUILTIN (BUILT_IN_GETTEXT, "gettext", BT_FN_STRING_CONST_STRING, ATTR_FORMAT_ARG_1) +DEF_C99_BUILTIN (BUILT_IN_IMAXABS, "imaxabs", BT_FN_INTMAX_INTMAX, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_INIT_DWARF_REG_SIZES, "init_dwarf_reg_size_table", BT_FN_VOID_PTR, ATTR_NULL) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITE, "finite", BT_FN_INT_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITEF, "finitef", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITEL, "finitel", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITED32, "finited32", BT_FN_INT_DFLOAT32, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITED64, "finited64", BT_FN_INT_DFLOAT64, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITED128, "finited128", BT_FN_INT_DFLOAT128, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_FPCLASSIFY, "fpclassify", BT_FN_INT_INT_INT_INT_INT_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_ISFINITE, "isfinite", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_ISINF_SIGN, "isinf_sign", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_C99_C90RES_BUILTIN (BUILT_IN_ISINF, "isinf", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC) +DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFF, "isinff", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFL, "isinfl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFD32, "isinfd32", BT_FN_INT_DFLOAT32, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFD64, "isinfd64", BT_FN_INT_DFLOAT64, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFD128, "isinfd128", BT_FN_INT_DFLOAT128, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_C90RES_BUILTIN (BUILT_IN_ISNAN, "isnan", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNANF, "isnanf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNANL, "isnanl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNAND32, "isnand32", BT_FN_INT_DFLOAT32, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNAND64, "isnand64", BT_FN_INT_DFLOAT64, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNAND128, "isnand128", BT_FN_INT_DFLOAT128, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_ISNORMAL, "isnormal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_ISGREATER, "isgreater", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_ISGREATEREQUAL, "isgreaterequal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_ISLESS, "isless", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_ISLESSEQUAL, "islessequal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_ISLESSGREATER, "islessgreater", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_ISUNORDERED, "isunordered", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF) +DEF_LIB_BUILTIN (BUILT_IN_LABS, "labs", BT_FN_LONG_LONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN_LLABS, "llabs", BT_FN_LONGLONG_LONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_LONGJMP, "longjmp", BT_FN_VOID_PTR_INT, ATTR_NORETURN_NOTHROW_LIST) +/* [trans-mem]: Adjust BUILT_IN_TM_MALLOC if BUILT_IN_MALLOC is changed. */ +DEF_LIB_BUILTIN (BUILT_IN_MALLOC, "malloc", BT_FN_PTR_SIZE, ATTR_MALLOC_WARN_UNUSED_RESULT_SIZE_1_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_NEXT_ARG, "next_arg", BT_FN_PTR_VAR, ATTR_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_PARITY, "parity", BT_FN_INT_UINT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_PARITYIMAX, "parityimax", BT_FN_INT_UINTMAX, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_PARITYL, "parityl", BT_FN_INT_ULONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_PARITYLL, "parityll", BT_FN_INT_ULONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_POPCOUNT, "popcount", BT_FN_INT_UINT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTIMAX, "popcountimax", BT_FN_INT_UINTMAX, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTL, "popcountl", BT_FN_INT_ULONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTLL, "popcountll", BT_FN_INT_ULONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_POSIX_MEMALIGN, "posix_memalign", BT_FN_INT_PTRPTR_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_GCC_BUILTIN (BUILT_IN_PREFETCH, "prefetch", BT_FN_VOID_CONST_PTR_VAR, ATTR_NOVOPS_LEAF_LIST) +DEF_LIB_BUILTIN (BUILT_IN_REALLOC, "realloc", BT_FN_PTR_PTR_SIZE, ATTR_ALLOC_WARN_UNUSED_RESULT_SIZE_2_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_RETURN, "return", BT_FN_VOID_PTR, ATTR_NORETURN_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_RETURN_ADDRESS, "return_address", BT_FN_PTR_UINT, ATTR_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SAVEREGS, "saveregs", BT_FN_PTR_VAR, ATTR_NULL) +DEF_GCC_BUILTIN (BUILT_IN_SETJMP, "setjmp", BT_FN_INT_PTR, ATTR_RT_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_STRFMON, "strfmon", BT_FN_SSIZE_STRING_SIZE_CONST_STRING_VAR, ATTR_FORMAT_STRFMON_NOTHROW_3_4) +DEF_LIB_BUILTIN (BUILT_IN_STRFTIME, "strftime", BT_FN_SIZE_STRING_SIZE_CONST_STRING_CONST_TM_PTR, ATTR_FORMAT_STRFTIME_NOTHROW_3_0) +DEF_GCC_BUILTIN (BUILT_IN_TRAP, "trap", BT_FN_VOID, ATTR_NORETURN_NOTHROW_LEAF_COLD_LIST) +DEF_GCC_BUILTIN (BUILT_IN_UNREACHABLE, "unreachable", BT_FN_VOID, ATTR_CONST_NORETURN_NOTHROW_LEAF_COLD_LIST) +DEF_GCC_BUILTIN (BUILT_IN_UNWIND_INIT, "unwind_init", BT_FN_VOID, ATTR_NULL) +DEF_GCC_BUILTIN (BUILT_IN_UPDATE_SETJMP_BUF, "update_setjmp_buf", BT_FN_VOID_PTR, ATTR_NULL) +DEF_GCC_BUILTIN (BUILT_IN_VA_COPY, "va_copy", BT_FN_VOID_VALIST_REF_VALIST_ARG, ATTR_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_VA_END, "va_end", BT_FN_VOID_VALIST_REF, ATTR_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_VA_START, "va_start", BT_FN_VOID_VALIST_REF_VAR, ATTR_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_VA_ARG_PACK, "va_arg_pack", BT_FN_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_VA_ARG_PACK_LEN, "va_arg_pack_len", BT_FN_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN__EXIT, "_exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LEAF_LIST) +DEF_C99_BUILTIN (BUILT_IN__EXIT2, "_Exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LEAF_LIST) + +/* Implementing nested functions. */ +DEF_BUILTIN_STUB (BUILT_IN_INIT_TRAMPOLINE, "__builtin_init_trampoline") +DEF_BUILTIN_STUB (BUILT_IN_INIT_HEAP_TRAMPOLINE, "__builtin_init_heap_trampoline") +DEF_BUILTIN_STUB (BUILT_IN_ADJUST_TRAMPOLINE, "__builtin_adjust_trampoline") +DEF_BUILTIN_STUB (BUILT_IN_INIT_DESCRIPTOR, "__builtin_init_descriptor") +DEF_BUILTIN_STUB (BUILT_IN_ADJUST_DESCRIPTOR, "__builtin_adjust_descriptor") +DEF_BUILTIN_STUB (BUILT_IN_NONLOCAL_GOTO, "__builtin_nonlocal_goto") + +/* Implementing __builtin_setjmp. */ +DEF_BUILTIN_STUB (BUILT_IN_SETJMP_SETUP, "__builtin_setjmp_setup") +DEF_BUILTIN_STUB (BUILT_IN_SETJMP_RECEIVER, "__builtin_setjmp_receiver") + +/* Implementing variable sized local variables. */ +DEF_BUILTIN_STUB (BUILT_IN_STACK_SAVE, "__builtin_stack_save") +DEF_BUILTIN_STUB (BUILT_IN_STACK_RESTORE, "__builtin_stack_restore") +DEF_BUILTIN_STUB (BUILT_IN_ALLOCA_WITH_ALIGN, "__builtin_alloca_with_align") +DEF_BUILTIN_STUB (BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX, "__builtin_alloca_with_align_and_max") + +/* An internal version of memcmp, used when the result is only tested for + equality with zero. */ +DEF_BUILTIN_STUB (BUILT_IN_MEMCMP_EQ, "__builtin_memcmp_eq") + +/* An internal version of strcmp/strncmp, used when the result is only + tested for equality with zero. */ +DEF_BUILTIN_STUB (BUILT_IN_STRCMP_EQ, "__builtin_strcmp_eq") +DEF_BUILTIN_STUB (BUILT_IN_STRNCMP_EQ, "__builtin_strncmp_eq") + +/* Object size checking builtins. */ +DEF_GCC_BUILTIN (BUILT_IN_OBJECT_SIZE, "object_size", BT_FN_SIZE_CONST_PTR_INT, ATTR_CONST_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_DYNAMIC_OBJECT_SIZE, "dynamic_object_size", BT_FN_SIZE_CONST_PTR_INT, ATTR_PURE_NOTHROW_LEAF_LIST) +DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMCPY_CHK, "__memcpy_chk", BT_FN_PTR_PTR_CONST_PTR_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMMOVE_CHK, "__memmove_chk", BT_FN_PTR_PTR_CONST_PTR_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMPCPY_CHK, "__mempcpy_chk", BT_FN_PTR_PTR_CONST_PTR_SIZE_SIZE, ATTR_RETNONNULL_NOTHROW_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMSET_CHK, "__memset_chk", BT_FN_PTR_PTR_INT_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_STPCPY_CHK, "__stpcpy_chk", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_RETNONNULL_NOTHROW_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_STPNCPY_CHK, "__stpncpy_chk", BT_FN_STRING_STRING_CONST_STRING_SIZE_SIZE, ATTR_RETNONNULL_NOTHROW_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_STRCAT_CHK, "__strcat_chk", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_STRCPY_CHK, "__strcpy_chk", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_STRNCAT_CHK, "__strncat_chk", BT_FN_STRING_STRING_CONST_STRING_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_STRNCPY_CHK, "__strncpy_chk", BT_FN_STRING_STRING_CONST_STRING_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SNPRINTF_CHK, "__snprintf_chk", BT_FN_INT_STRING_SIZE_INT_SIZE_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_NOTHROW_5_6) +DEF_EXT_LIB_BUILTIN (BUILT_IN_SPRINTF_CHK, "__sprintf_chk", BT_FN_INT_STRING_INT_SIZE_CONST_STRING_VAR, ATTR_NOTHROW_NONNULL_1_FORMAT_PRINTF_4_5) +DEF_EXT_LIB_BUILTIN (BUILT_IN_VSNPRINTF_CHK, "__vsnprintf_chk", BT_FN_INT_STRING_SIZE_INT_SIZE_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_NOTHROW_5_0) +DEF_EXT_LIB_BUILTIN (BUILT_IN_VSPRINTF_CHK, "__vsprintf_chk", BT_FN_INT_STRING_INT_SIZE_CONST_STRING_VALIST_ARG, ATTR_NOTHROW_NONNULL_1_FORMAT_PRINTF_4_0) +DEF_EXT_LIB_BUILTIN (BUILT_IN_FPRINTF_CHK, "__fprintf_chk", BT_FN_INT_FILEPTR_INT_CONST_STRING_VAR, ATTR_NONNULL_1_FORMAT_PRINTF_3_4) +DEF_EXT_LIB_BUILTIN (BUILT_IN_PRINTF_CHK, "__printf_chk", BT_FN_INT_INT_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_2_3) +DEF_EXT_LIB_BUILTIN (BUILT_IN_VFPRINTF_CHK, "__vfprintf_chk", BT_FN_INT_FILEPTR_INT_CONST_STRING_VALIST_ARG, ATTR_NONNULL_1_FORMAT_PRINTF_3_0) +DEF_EXT_LIB_BUILTIN (BUILT_IN_VPRINTF_CHK, "__vprintf_chk", BT_FN_INT_INT_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_2_0) + +/* Profiling hooks. */ +DEF_BUILTIN (BUILT_IN_PROFILE_FUNC_ENTER, "__cyg_profile_func_enter", BUILT_IN_NORMAL, BT_FN_VOID_PTR_PTR, BT_LAST, + false, false, false, ATTR_NULL, true, true) +DEF_BUILTIN (BUILT_IN_PROFILE_FUNC_EXIT, "__cyg_profile_func_exit", BUILT_IN_NORMAL, BT_FN_VOID_PTR_PTR, BT_LAST, + false, false, false, ATTR_NULL, true, true) + +/* TLS thread pointer related builtins. */ +DEF_BUILTIN (BUILT_IN_THREAD_POINTER, "__builtin_thread_pointer", + BUILT_IN_NORMAL, BT_FN_PTR, BT_LAST, + false, false, true, ATTR_CONST_NOTHROW_LIST, true, + targetm.have_tls) + +DEF_BUILTIN (BUILT_IN_SET_THREAD_POINTER, "__builtin_set_thread_pointer", + BUILT_IN_NORMAL, BT_FN_VOID_PTR, BT_LAST, + false, false, true, ATTR_NOTHROW_LIST, true, + targetm.have_tls) + +/* TLS emulation. */ +DEF_BUILTIN (BUILT_IN_EMUTLS_GET_ADDRESS, targetm.emutls.get_address, + BUILT_IN_NORMAL, + BT_FN_PTR_PTR, BT_FN_PTR_PTR, + true, true, true, ATTR_CONST_NOTHROW_NONNULL_LEAF, false, + !targetm.have_tls) +DEF_BUILTIN (BUILT_IN_EMUTLS_REGISTER_COMMON, + targetm.emutls.register_common, BUILT_IN_NORMAL, + BT_FN_VOID_PTR_WORD_WORD_PTR, BT_FN_VOID_PTR_WORD_WORD_PTR, + true, true, true, ATTR_NOTHROW_LEAF_LIST, false, + !targetm.have_tls) + +/* Suppressing speculation. Users are expected to use the first (N) + variant, which will be translated internally into one of the other + types. */ + +DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_N, "speculation_safe_value", + BT_FN_VOID_VAR, ATTR_NOVOPS_NOTHROW_LEAF_LIST) + +DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_PTR, + "speculation_safe_value_ptr", BT_FN_PTR_PTR_VAR, + ATTR_NOVOPS_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_1, "speculation_safe_value_1", + BT_FN_I1_I1_VAR, ATTR_NOVOPS_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_2, "speculation_safe_value_2", + BT_FN_I2_I2_VAR, ATTR_NOVOPS_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_4, "speculation_safe_value_4", + BT_FN_I4_I4_VAR, ATTR_NOVOPS_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_8, "speculation_safe_value_8", + BT_FN_I8_I8_VAR, ATTR_NOVOPS_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_16, + "speculation_safe_value_16", BT_FN_I16_I16_VAR, + ATTR_NOVOPS_NOTHROW_LEAF_LIST) + +/* Exception support. */ +DEF_BUILTIN_STUB (BUILT_IN_UNWIND_RESUME, "__builtin_unwind_resume") +DEF_BUILTIN_STUB (BUILT_IN_CXA_END_CLEANUP, "__builtin_cxa_end_cleanup") +DEF_BUILTIN_STUB (BUILT_IN_EH_POINTER, "__builtin_eh_pointer") +DEF_BUILTIN_STUB (BUILT_IN_EH_FILTER, "__builtin_eh_filter") +DEF_BUILTIN_STUB (BUILT_IN_EH_COPY_VALUES, "__builtin_eh_copy_values") + +/* __FILE__, __LINE__, __FUNCTION__ as builtins. */ +DEF_GCC_BUILTIN (BUILT_IN_FILE, "FILE", BT_FN_CONST_STRING, ATTR_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_FUNCTION, "FUNCTION", BT_FN_CONST_STRING, ATTR_NOTHROW_LEAF_LIST) +DEF_GCC_BUILTIN (BUILT_IN_LINE, "LINE", BT_FN_INT, ATTR_NOTHROW_LEAF_LIST) + +/* Synchronization Primitives. */ +#include "sync-builtins.def" + +/* Offloading and Multi Processing builtins. */ +#include "omp-builtins.def" + +/* GTM builtins. */ +#include "gtm-builtins.def" + +/* Sanitizer builtins. */ +#include "sanitizer.def" + +/* Coroutine builtins. */ +#include "coroutine-builtins.def" + +#undef DEF_BUILTIN diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/builtins.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/builtins.h new file mode 100644 index 0000000..5ad830c --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/builtins.h @@ -0,0 +1,160 @@ +/* Expand builtin functions. + Copyright (C) 1988-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_BUILTINS_H +#define GCC_BUILTINS_H + +#include + +/* Target-dependent globals. */ +struct target_builtins { + /* For each register that may be used for calling a function, this + gives a mode used to copy the register's value. VOIDmode indicates + the register is not used for calling a function. If the machine + has register windows, this gives only the outbound registers. + INCOMING_REGNO gives the corresponding inbound register. */ + fixed_size_mode_pod x_apply_args_mode[FIRST_PSEUDO_REGISTER]; + + /* For each register that may be used for returning values, this gives + a mode used to copy the register's value. VOIDmode indicates the + register is not used for returning values. If the machine has + register windows, this gives only the outbound registers. + INCOMING_REGNO gives the corresponding inbound register. */ + fixed_size_mode_pod x_apply_result_mode[FIRST_PSEUDO_REGISTER]; +}; + +extern struct target_builtins default_target_builtins; +#if SWITCHABLE_TARGET +extern struct target_builtins *this_target_builtins; +#else +#define this_target_builtins (&default_target_builtins) +#endif + +/* Non-zero if __builtin_constant_p should be folded right away. */ +extern bool force_folding_builtin_constant_p; + +extern bool called_as_built_in (tree); +extern bool get_object_alignment_1 (tree, unsigned int *, + unsigned HOST_WIDE_INT *); +extern bool get_object_alignment_2 (tree, unsigned int *, + unsigned HOST_WIDE_INT *, bool); +extern unsigned int get_object_alignment (tree); +extern bool get_pointer_alignment_1 (tree, unsigned int *, + unsigned HOST_WIDE_INT *); +extern unsigned int get_pointer_alignment (tree); +extern unsigned string_length (const void*, unsigned, unsigned); + +struct c_strlen_data +{ + /* [MINLEN, MAXBOUND, MAXLEN] is a range describing the length of + one or more strings of possibly unknown length. For a single + string of known length the range is a constant where + MINLEN == MAXBOUND == MAXLEN holds. + For other strings, MINLEN is the length of the shortest known + string. MAXBOUND is the length of a string that could be stored + in the largest array referenced by the expression. MAXLEN is + the length of the longest sequence of non-zero bytes + in an object referenced by the expression. For such strings, + MINLEN <= MAXBOUND <= MAXLEN holds. For example, given: + struct A { char a[7], b[]; }; + extern struct A *p; + n = strlen (p->a); + the computed range will be [0, 6, ALL_ONES]. + However, for a conditional expression involving a string + of known length and an array of unknown bound such as + n = strlen (i ? p->b : "123"); + the range will be [3, 3, ALL_ONES]. + MINLEN != 0 && MAXLEN == ALL_ONES indicates that MINLEN is + the length of the shortest known string and implies that + the shortest possible string referenced by the expression may + actually be the empty string. This distinction is useful for + diagnostics. get_range_strlen() return value distinguishes + between these two cases. + As the tighter (and more optimistic) bound, MAXBOUND is suitable + for diagnostics but not for optimization. + As the more conservative bound, MAXLEN is intended to be used + for optimization. */ + tree minlen; + tree maxlen; + tree maxbound; + /* When non-null, DECL refers to the declaration known to store + an unterminated constant character array, as in: + const char s[] = { 'a', 'b', 'c' }; + It is used to diagnose uses of such arrays in functions such as + strlen() that expect a nul-terminated string as an argument. */ + tree decl; + /* Non-constant offset from the beginning of a string not accounted + for in the length range. Used to improve diagnostics. */ + tree off; +}; + +extern tree c_strlen (tree, int, c_strlen_data * = NULL, unsigned = 1); +extern rtx c_readstr (const char *, scalar_int_mode, bool = true); +extern void expand_builtin_setjmp_setup (rtx, rtx); +extern void expand_builtin_setjmp_receiver (rtx); +extern void expand_builtin_update_setjmp_buf (rtx); +extern tree mathfn_built_in (tree, enum built_in_function fn); +extern tree mathfn_built_in (tree, combined_fn); +extern tree mathfn_built_in_type (combined_fn); +extern rtx builtin_strncpy_read_str (void *, void *, HOST_WIDE_INT, + fixed_size_mode); +extern rtx builtin_memset_read_str (void *, void *, HOST_WIDE_INT, + fixed_size_mode); +extern rtx expand_builtin_memset (tree, rtx, machine_mode); +extern rtx expand_builtin_saveregs (void); +extern tree std_build_builtin_va_list (void); +extern tree std_fn_abi_va_list (tree); +extern tree std_canonical_va_list_type (tree); +extern void std_expand_builtin_va_start (tree, rtx); +extern void expand_builtin_trap (void); +extern void expand_ifn_atomic_bit_test_and (gcall *); +extern void expand_ifn_atomic_compare_exchange (gcall *); +extern void expand_ifn_atomic_op_fetch_cmp_0 (gcall *); +extern rtx expand_builtin (tree, rtx, rtx, machine_mode, int); +extern enum built_in_function builtin_mathfn_code (const_tree); +extern tree fold_builtin_expect (location_t, tree, tree, tree, tree); +extern bool avoid_folding_inline_builtin (tree); +extern tree fold_call_expr (location_t, tree, bool); +extern tree fold_builtin_call_array (location_t, tree, tree, int, tree *); +extern bool validate_gimple_arglist (const gcall *, ...); +extern rtx default_expand_builtin (tree, rtx, rtx, machine_mode, int); +extern void maybe_emit_call_builtin___clear_cache (rtx, rtx); +extern bool fold_builtin_next_arg (tree, bool); +extern tree do_mpc_arg2 (tree, tree, tree, int, int (*)(mpc_ptr, mpc_srcptr, mpc_srcptr, mpc_rnd_t)); +extern tree fold_call_stmt (gcall *, bool); +extern void set_builtin_user_assembler_name (tree decl, const char *asmspec); +extern bool is_simple_builtin (tree); +extern bool is_inexpensive_builtin (tree); +extern bool readonly_data_expr (tree exp); +extern bool init_target_chars (void); +extern unsigned HOST_WIDE_INT target_newline; +extern unsigned HOST_WIDE_INT target_percent; +extern char target_percent_s[3]; +extern char target_percent_c[3]; +extern char target_percent_s_newline[4]; +extern bool target_char_cst_p (tree t, char *p); +extern rtx get_memory_rtx (tree exp, tree len); + +extern internal_fn associated_internal_fn (combined_fn, tree); +extern internal_fn associated_internal_fn (tree); +extern internal_fn replacement_internal_fn (gcall *); + +extern bool builtin_with_linkage_p (tree); + +#endif /* GCC_BUILTINS_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/bversion.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/bversion.h new file mode 100644 index 0000000..21f581b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/bversion.h @@ -0,0 +1,4 @@ +#define BUILDING_GCC_MAJOR 12 +#define BUILDING_GCC_MINOR 2 +#define BUILDING_GCC_PATCHLEVEL 0 +#define BUILDING_GCC_VERSION (BUILDING_GCC_MAJOR * 1000 + BUILDING_GCC_MINOR) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-common.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-common.def new file mode 100644 index 0000000..0759ace --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-common.def @@ -0,0 +1,89 @@ +/* This file contains the definitions and documentation for the + additional tree codes used in the GNU C compiler (see tree.def + for the standard codes). + Copyright (C) 1987-2022 Free Software Foundation, Inc. + Written by Benjamin Chelf + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Tree nodes used in the C frontend. These are also shared with the + C++ and Objective C frontends. */ + +/* A C_MAYBE_CONST_EXPR, currently only used for C and Objective C, + tracks information about constancy of an expression and VLA type + sizes or VM expressions from typeof that need to be evaluated + before the main expression. It is used during parsing and removed + in c_fully_fold. C_MAYBE_CONST_EXPR_PRE is the expression to + evaluate first, if not NULL; C_MAYBE_CONST_EXPR_EXPR is the main + expression. If C_MAYBE_CONST_EXPR_INT_OPERANDS is set then the + expression may be used in an unevaluated part of an integer + constant expression, but not in an evaluated part. If + C_MAYBE_CONST_EXPR_NON_CONST is set then the expression contains + something that cannot occur in an evaluated part of a constant + expression (or outside of sizeof in C90 mode); otherwise it does + not. */ +DEFTREECODE (C_MAYBE_CONST_EXPR, "c_maybe_const_expr", tcc_expression, 2) + +/* An EXCESS_PRECISION_EXPR, currently only used for C and Objective + C, represents an expression evaluated in greater range or precision + than its type. The type of the EXCESS_PRECISION_EXPR is the + semantic type while the operand represents what is actually being + evaluated. */ +DEFTREECODE (EXCESS_PRECISION_EXPR, "excess_precision_expr", tcc_expression, 1) + +/* Used to represent a user-defined literal. + The operands are an IDENTIFIER for the suffix, the VALUE of the literal, + and for numeric literals the original string representation of the + number. */ +DEFTREECODE (USERDEF_LITERAL, "userdef_literal", tcc_exceptional, 3) + +/* Represents a 'sizeof' expression during C++ template expansion, + or for the purpose of -Wsizeof-pointer-memaccess warning. */ +DEFTREECODE (SIZEOF_EXPR, "sizeof_expr", tcc_expression, 1) + +/* Like above, but enclosed in parentheses. Used to suppress warnings. */ +DEFTREECODE (PAREN_SIZEOF_EXPR, "paren_sizeof_expr", tcc_expression, 1) + +/* Used to represent a `for' statement. The operands are + FOR_INIT_STMT, FOR_COND, FOR_EXPR, FOR_BODY, and FOR_SCOPE, + respectively. */ +DEFTREECODE (FOR_STMT, "for_stmt", tcc_statement, 5) + +/* Used to represent a 'while' statement. The operands are WHILE_COND + and WHILE_BODY, respectively. */ +DEFTREECODE (WHILE_STMT, "while_stmt", tcc_statement, 2) + +/* Used to represent a 'do' statement. The operands are DO_COND and + DO_BODY, respectively. */ +DEFTREECODE (DO_STMT, "do_stmt", tcc_statement, 2) + +/* Used to represent a 'break' statement. */ +DEFTREECODE (BREAK_STMT, "break_stmt", tcc_statement, 0) + +/* Used to represent a 'continue' statement. */ +DEFTREECODE (CONTINUE_STMT, "continue_stmt", tcc_statement, 0) + +/* Used to represent a 'switch' statement. The operands are + SWITCH_STMT_COND, SWITCH_STMT_BODY, SWITCH_STMT_TYPE, and + SWITCH_STMT_SCOPE, respectively. */ +DEFTREECODE (SWITCH_STMT, "switch_stmt", tcc_statement, 4) + +/* +Local variables: +mode:c +End: +*/ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-common.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-common.h new file mode 100644 index 0000000..52a85bf --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-common.h @@ -0,0 +1,1527 @@ +/* Definitions for c-common.cc. + Copyright (C) 1987-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_C_COMMON_H +#define GCC_C_COMMON_H + +#include "splay-tree.h" +#include "cpplib.h" +#include "alias.h" +#include "tree.h" +#include "fold-const.h" +#include "wide-int-bitmask.h" + +/* In order for the format checking to accept the C frontend + diagnostic framework extensions, you must include this file before + diagnostic-core.h, not after. The C front end formats are a subset of those + for C++, so they are the appropriate set to use in common code; + cp-tree.h overrides this for C++. */ +#if defined(GCC_DIAGNOSTIC_CORE_H) +#error \ +In order for the format checking to accept the C front end diagnostic \ +framework extensions, you must include this file before diagnostic-core.h \ +never after. +#endif +#ifndef GCC_DIAG_STYLE +#define GCC_DIAG_STYLE __gcc_cdiag__ +#endif +#include "diagnostic-core.h" + +/* Usage of TREE_LANG_FLAG_?: + 0: IDENTIFIER_MARKED (used by search routines). + C_MAYBE_CONST_EXPR_INT_OPERANDS (in C_MAYBE_CONST_EXPR, for C) + 1: C_DECLARED_LABEL_FLAG (in LABEL_DECL) + STATEMENT_LIST_STMT_EXPR (in STATEMENT_LIST) + C_MAYBE_CONST_EXPR_NON_CONST (in C_MAYBE_CONST_EXPR, for C) + 2: unused + 3: STATEMENT_LIST_HAS_LABEL (in STATEMENT_LIST) + 4: unused +*/ + +/* Reserved identifiers. This is the union of all the keywords for C, + C++, and Objective-C. All the type modifiers have to be in one + block at the beginning, because they are used as mask bits. There + are 28 type modifiers; if we add many more we will have to redesign + the mask mechanism. */ + +enum rid +{ + /* Modifiers: */ + /* C, in empirical order of frequency. */ + RID_STATIC = 0, + RID_UNSIGNED, RID_LONG, RID_CONST, RID_EXTERN, + RID_REGISTER, RID_TYPEDEF, RID_SHORT, RID_INLINE, + RID_VOLATILE, RID_SIGNED, RID_AUTO, RID_RESTRICT, + RID_NORETURN, RID_ATOMIC, + + /* C extensions */ + RID_COMPLEX, RID_THREAD, RID_SAT, + + /* C++ */ + RID_FRIEND, RID_VIRTUAL, RID_EXPLICIT, RID_EXPORT, RID_MUTABLE, + + /* ObjC ("PQ" reserved words - they do not appear after a '@' and + are keywords only in specific contexts) */ + RID_IN, RID_OUT, RID_INOUT, RID_BYCOPY, RID_BYREF, RID_ONEWAY, + + /* ObjC ("PATTR" reserved words - they do not appear after a '@' + and are keywords only as property attributes) */ + RID_GETTER, RID_SETTER, + RID_READONLY, RID_READWRITE, + RID_ASSIGN, RID_RETAIN, RID_COPY, + RID_PROPATOMIC, RID_NONATOMIC, + + /* ObjC nullability support keywords that also can appear in the + property attribute context. These values should remain contiguous + with the other property attributes. */ + RID_NULL_UNSPECIFIED, RID_NULLABLE, RID_NONNULL, RID_NULL_RESETTABLE, + + /* C (reserved and imaginary types not implemented, so any use is a + syntax error) */ + RID_IMAGINARY, + + /* C */ + RID_INT, RID_CHAR, RID_FLOAT, RID_DOUBLE, RID_VOID, + RID_ENUM, RID_STRUCT, RID_UNION, RID_IF, RID_ELSE, + RID_WHILE, RID_DO, RID_FOR, RID_SWITCH, RID_CASE, + RID_DEFAULT, RID_BREAK, RID_CONTINUE, RID_RETURN, RID_GOTO, + RID_SIZEOF, + + /* C extensions */ + RID_ASM, RID_TYPEOF, RID_ALIGNOF, RID_ATTRIBUTE, RID_VA_ARG, + RID_EXTENSION, RID_IMAGPART, RID_REALPART, RID_LABEL, RID_CHOOSE_EXPR, + RID_TYPES_COMPATIBLE_P, RID_BUILTIN_COMPLEX, RID_BUILTIN_SHUFFLE, + RID_BUILTIN_SHUFFLEVECTOR, RID_BUILTIN_CONVERTVECTOR, RID_BUILTIN_TGMATH, + RID_BUILTIN_HAS_ATTRIBUTE, RID_BUILTIN_ASSOC_BARRIER, + RID_DFLOAT32, RID_DFLOAT64, RID_DFLOAT128, + + /* TS 18661-3 keywords, in the same sequence as the TI_* values. */ + RID_FLOAT16, + RID_FLOATN_NX_FIRST = RID_FLOAT16, + RID_FLOAT32, + RID_FLOAT64, + RID_FLOAT128, + RID_FLOAT32X, + RID_FLOAT64X, + RID_FLOAT128X, +#define CASE_RID_FLOATN_NX \ + case RID_FLOAT16: case RID_FLOAT32: case RID_FLOAT64: case RID_FLOAT128: \ + case RID_FLOAT32X: case RID_FLOAT64X: case RID_FLOAT128X + + RID_FRACT, RID_ACCUM, RID_AUTO_TYPE, RID_BUILTIN_CALL_WITH_STATIC_CHAIN, + + /* "__GIMPLE", for the GIMPLE-parsing extension to the C frontend. */ + RID_GIMPLE, + + /* "__PHI", for parsing PHI function in GIMPLE FE. */ + RID_PHI, + + /* "__RTL", for the RTL-parsing extension to the C frontend. */ + RID_RTL, + + /* C11 */ + RID_ALIGNAS, RID_GENERIC, + + /* This means to warn that this is a C++ keyword, and then treat it + as a normal identifier. */ + RID_CXX_COMPAT_WARN, + + /* GNU transactional memory extension */ + RID_TRANSACTION_ATOMIC, RID_TRANSACTION_RELAXED, RID_TRANSACTION_CANCEL, + + /* Too many ways of getting the name of a function as a string */ + RID_FUNCTION_NAME, RID_PRETTY_FUNCTION_NAME, RID_C99_FUNCTION_NAME, + + /* C++ (some of these are keywords in Objective-C as well, but only + if they appear after a '@') */ + RID_BOOL, RID_WCHAR, RID_CLASS, + RID_PUBLIC, RID_PRIVATE, RID_PROTECTED, + RID_TEMPLATE, RID_NULL, RID_CATCH, + RID_DELETE, RID_FALSE, RID_NAMESPACE, + RID_NEW, RID_OFFSETOF, RID_OPERATOR, + RID_THIS, RID_THROW, RID_TRUE, + RID_TRY, RID_TYPENAME, RID_TYPEID, + RID_USING, RID_CHAR16, RID_CHAR32, + + /* casts */ + RID_CONSTCAST, RID_DYNCAST, RID_REINTCAST, RID_STATCAST, + + /* C++ extensions */ + RID_ADDRESSOF, RID_BASES, + RID_BUILTIN_LAUNDER, RID_DIRECT_BASES, + RID_HAS_NOTHROW_ASSIGN, RID_HAS_NOTHROW_CONSTRUCTOR, + RID_HAS_NOTHROW_COPY, RID_HAS_TRIVIAL_ASSIGN, + RID_HAS_TRIVIAL_CONSTRUCTOR, RID_HAS_TRIVIAL_COPY, + RID_HAS_TRIVIAL_DESTRUCTOR, RID_HAS_UNIQUE_OBJ_REPRESENTATIONS, + RID_HAS_VIRTUAL_DESTRUCTOR, RID_BUILTIN_BIT_CAST, + RID_IS_ABSTRACT, RID_IS_AGGREGATE, + RID_IS_BASE_OF, RID_IS_CLASS, + RID_IS_EMPTY, RID_IS_ENUM, + RID_IS_FINAL, RID_IS_LAYOUT_COMPATIBLE, + RID_IS_LITERAL_TYPE, + RID_IS_POINTER_INTERCONVERTIBLE_BASE_OF, + RID_IS_POD, RID_IS_POLYMORPHIC, + RID_IS_SAME_AS, + RID_IS_STD_LAYOUT, RID_IS_TRIVIAL, + RID_IS_TRIVIALLY_ASSIGNABLE, RID_IS_TRIVIALLY_CONSTRUCTIBLE, + RID_IS_TRIVIALLY_COPYABLE, + RID_IS_UNION, RID_UNDERLYING_TYPE, + RID_IS_ASSIGNABLE, RID_IS_CONSTRUCTIBLE, + RID_IS_NOTHROW_ASSIGNABLE, RID_IS_NOTHROW_CONSTRUCTIBLE, + + /* C++11 */ + RID_CONSTEXPR, RID_DECLTYPE, RID_NOEXCEPT, RID_NULLPTR, RID_STATIC_ASSERT, + + /* C++20 */ + RID_CONSTINIT, RID_CONSTEVAL, + + /* char8_t */ + RID_CHAR8, + + /* C++ concepts */ + RID_CONCEPT, RID_REQUIRES, + + /* C++ modules. */ + RID__MODULE, RID__IMPORT, RID__EXPORT, /* Internal tokens. */ + + /* C++ coroutines */ + RID_CO_AWAIT, RID_CO_YIELD, RID_CO_RETURN, + + /* C++ transactional memory. */ + RID_ATOMIC_NOEXCEPT, RID_ATOMIC_CANCEL, RID_SYNCHRONIZED, + + /* Objective-C ("AT" reserved words - they are only keywords when + they follow '@') */ + RID_AT_ENCODE, RID_AT_END, + RID_AT_CLASS, RID_AT_ALIAS, RID_AT_DEFS, + RID_AT_PRIVATE, RID_AT_PROTECTED, RID_AT_PUBLIC, RID_AT_PACKAGE, + RID_AT_PROTOCOL, RID_AT_SELECTOR, + RID_AT_THROW, RID_AT_TRY, RID_AT_CATCH, + RID_AT_FINALLY, RID_AT_SYNCHRONIZED, + RID_AT_OPTIONAL, RID_AT_REQUIRED, RID_AT_PROPERTY, + RID_AT_SYNTHESIZE, RID_AT_DYNAMIC, + RID_AT_INTERFACE, + RID_AT_IMPLEMENTATION, + + /* Named address support, mapping the keyword to a particular named address + number. Named address space 0 is reserved for the generic address. If + there are more than 254 named addresses, the addr_space_t type will need + to be grown from an unsigned char to unsigned short. */ + RID_ADDR_SPACE_0, /* generic address */ + RID_ADDR_SPACE_1, + RID_ADDR_SPACE_2, + RID_ADDR_SPACE_3, + RID_ADDR_SPACE_4, + RID_ADDR_SPACE_5, + RID_ADDR_SPACE_6, + RID_ADDR_SPACE_7, + RID_ADDR_SPACE_8, + RID_ADDR_SPACE_9, + RID_ADDR_SPACE_10, + RID_ADDR_SPACE_11, + RID_ADDR_SPACE_12, + RID_ADDR_SPACE_13, + RID_ADDR_SPACE_14, + RID_ADDR_SPACE_15, + + RID_FIRST_ADDR_SPACE = RID_ADDR_SPACE_0, + RID_LAST_ADDR_SPACE = RID_ADDR_SPACE_15, + + /* __intN keywords. The _N_M here doesn't correspond to the intN + in the keyword; use the bitsize in int_n_t_data_t[M] for that. + For example, if int_n_t_data_t[0].bitsize is 13, then RID_INT_N_0 + is for __int13. */ + + /* Note that the range to use is RID_FIRST_INT_N through + RID_FIRST_INT_N + NUM_INT_N_ENTS - 1 and c-parser.cc has a list of + all RID_INT_N_* in a case statement. */ + + RID_INT_N_0, + RID_INT_N_1, + RID_INT_N_2, + RID_INT_N_3, + + RID_FIRST_INT_N = RID_INT_N_0, + RID_LAST_INT_N = RID_INT_N_3, + + RID_MAX, + + RID_FIRST_MODIFIER = RID_STATIC, + RID_LAST_MODIFIER = RID_ONEWAY, + + RID_FIRST_CXX11 = RID_CONSTEXPR, + RID_LAST_CXX11 = RID_STATIC_ASSERT, + RID_FIRST_CXX20 = RID_CONSTINIT, + RID_LAST_CXX20 = RID_CONSTINIT, + RID_FIRST_AT = RID_AT_ENCODE, + RID_LAST_AT = RID_AT_IMPLEMENTATION, + RID_FIRST_PQ = RID_IN, + RID_LAST_PQ = RID_ONEWAY, + RID_FIRST_PATTR = RID_GETTER, + RID_LAST_PATTR = RID_NULL_RESETTABLE +}; + +#define OBJC_IS_AT_KEYWORD(rid) \ + ((unsigned int) (rid) >= (unsigned int) RID_FIRST_AT && \ + (unsigned int) (rid) <= (unsigned int) RID_LAST_AT) + +#define OBJC_IS_PQ_KEYWORD(rid) \ + ((unsigned int) (rid) >= (unsigned int) RID_FIRST_PQ && \ + (unsigned int) (rid) <= (unsigned int) RID_LAST_PQ) + +/* Keywords permitted in an @property attribute context. */ +#define OBJC_IS_PATTR_KEYWORD(rid) \ + ((((unsigned int) (rid) >= (unsigned int) RID_FIRST_PATTR && \ + (unsigned int) (rid) <= (unsigned int) RID_LAST_PATTR)) \ + || rid == RID_CLASS) + +/* OBJC_IS_CXX_KEYWORD recognizes the 'CXX_OBJC' keywords (such as + 'class') which are shared in a subtle way between Objective-C and + C++. When the lexer is lexing in Objective-C/Objective-C++, if it + finds '@' followed by one of these identifiers (eg, '@class'), it + recognizes the whole as an Objective-C keyword. If the identifier + is found elsewhere, it follows the rules of the C/C++ language. + */ +#define OBJC_IS_CXX_KEYWORD(rid) \ + (rid == RID_CLASS || rid == RID_SYNCHRONIZED \ + || rid == RID_PUBLIC || rid == RID_PROTECTED || rid == RID_PRIVATE \ + || rid == RID_TRY || rid == RID_THROW || rid == RID_CATCH) + +/* The elements of `ridpointers' are identifier nodes for the reserved + type names and storage classes. It is indexed by a RID_... value. */ +extern GTY ((length ("(int) RID_MAX"))) tree *ridpointers; + +/* Standard named or nameless data types of the C compiler. */ + +enum c_tree_index +{ + CTI_CHAR8_TYPE, + CTI_CHAR16_TYPE, + CTI_CHAR32_TYPE, + CTI_WCHAR_TYPE, + CTI_UNDERLYING_WCHAR_TYPE, + CTI_WINT_TYPE, + CTI_SIGNED_SIZE_TYPE, /* For format checking only. */ + CTI_UNSIGNED_PTRDIFF_TYPE, /* For format checking only. */ + CTI_INTMAX_TYPE, + CTI_UINTMAX_TYPE, + CTI_WIDEST_INT_LIT_TYPE, + CTI_WIDEST_UINT_LIT_TYPE, + + /* Types for , that may not be defined on all + targets. */ + CTI_SIG_ATOMIC_TYPE, + CTI_INT8_TYPE, + CTI_INT16_TYPE, + CTI_INT32_TYPE, + CTI_INT64_TYPE, + CTI_UINT8_TYPE, + CTI_UINT16_TYPE, + CTI_UINT32_TYPE, + CTI_UINT64_TYPE, + CTI_INT_LEAST8_TYPE, + CTI_INT_LEAST16_TYPE, + CTI_INT_LEAST32_TYPE, + CTI_INT_LEAST64_TYPE, + CTI_UINT_LEAST8_TYPE, + CTI_UINT_LEAST16_TYPE, + CTI_UINT_LEAST32_TYPE, + CTI_UINT_LEAST64_TYPE, + CTI_INT_FAST8_TYPE, + CTI_INT_FAST16_TYPE, + CTI_INT_FAST32_TYPE, + CTI_INT_FAST64_TYPE, + CTI_UINT_FAST8_TYPE, + CTI_UINT_FAST16_TYPE, + CTI_UINT_FAST32_TYPE, + CTI_UINT_FAST64_TYPE, + CTI_INTPTR_TYPE, + CTI_UINTPTR_TYPE, + + CTI_CHAR_ARRAY_TYPE, + CTI_CHAR8_ARRAY_TYPE, + CTI_CHAR16_ARRAY_TYPE, + CTI_CHAR32_ARRAY_TYPE, + CTI_WCHAR_ARRAY_TYPE, + CTI_STRING_TYPE, + CTI_CONST_STRING_TYPE, + + /* Type for boolean expressions (bool in C++, int in C). */ + CTI_TRUTHVALUE_TYPE, + CTI_TRUTHVALUE_TRUE, + CTI_TRUTHVALUE_FALSE, + + CTI_DEFAULT_FUNCTION_TYPE, + + CTI_NULL, + + /* These are not types, but we have to look them up all the time. */ + CTI_FUNCTION_NAME_DECL, + CTI_PRETTY_FUNCTION_NAME_DECL, + CTI_C99_FUNCTION_NAME_DECL, + + CTI_MODULE_HWM, + /* Below here entities change during compilation. */ + + CTI_SAVED_FUNCTION_NAME_DECLS, + + CTI_MAX +}; + +#define C_CPP_HASHNODE(id) \ + (&(((struct c_common_identifier *) (id))->node)) +#define C_RID_CODE(id) \ + ((enum rid) (((struct c_common_identifier *) (id))->node.rid_code)) +#define C_SET_RID_CODE(id, code) \ + (((struct c_common_identifier *) (id))->node.rid_code = (unsigned char) code) + +/* Identifier part common to the C front ends. Inherits from + tree_identifier, despite appearances. */ +struct GTY(()) c_common_identifier { + struct tree_common common; + struct cpp_hashnode node; +}; + +/* An entry in the reserved keyword table. */ + +struct c_common_resword +{ + const char *const word; + ENUM_BITFIELD(rid) const rid : 16; + const unsigned int disable : 16; +}; + +/* Mode used to build pointers (VOIDmode means ptr_mode). */ + +extern machine_mode c_default_pointer_mode; + +/* Extra cpp_ttype values for C++. */ + +/* A token type for template-ids. If a template-id is processed while + parsing tentatively, it is replaced with a CPP_TEMPLATE_ID token; + the value of the CPP_TEMPLATE_ID is whatever was returned by + cp_parser_template_id. */ +#define CPP_TEMPLATE_ID ((enum cpp_ttype) (CPP_KEYWORD + 1)) + +/* A token type for nested-name-specifiers. If a + nested-name-specifier is processed while parsing tentatively, it is + replaced with a CPP_NESTED_NAME_SPECIFIER token; the value of the + CPP_NESTED_NAME_SPECIFIER is whatever was returned by + cp_parser_nested_name_specifier_opt. */ +#define CPP_NESTED_NAME_SPECIFIER ((enum cpp_ttype) (CPP_TEMPLATE_ID + 1)) + +/* A token type for pre-parsed C++0x decltype. */ +#define CPP_DECLTYPE ((enum cpp_ttype) (CPP_NESTED_NAME_SPECIFIER + 1)) + +/* A token type for pre-parsed primary-expression (lambda- or statement-). */ +#define CPP_PREPARSED_EXPR ((enum cpp_ttype) (CPP_DECLTYPE + 1)) + +/* The number of token types, including C++-specific ones. */ +#define N_CP_TTYPES ((int) (CPP_PREPARSED_EXPR + 1)) + +/* Disable mask. Keywords are disabled if (reswords[i].disable & + mask) is _true_. Thus for keywords which are present in all + languages the disable field is zero. */ + +#define D_CONLY 0x0001 /* C only (not in C++). */ +#define D_CXXONLY 0x0002 /* C++ only (not in C). */ +#define D_C99 0x0004 /* In C, C99 only. */ +#define D_CXX11 0x0008 /* In C++, C++11 only. */ +#define D_EXT 0x0010 /* GCC extension. */ +#define D_EXT89 0x0020 /* GCC extension incorporated in C99. */ +#define D_ASM 0x0040 /* Disabled by -fno-asm. */ +#define D_OBJC 0x0080 /* In Objective C and neither C nor C++. */ +#define D_CXX_OBJC 0x0100 /* In Objective C, and C++, but not C. */ +#define D_CXXWARN 0x0200 /* In C warn with -Wcxx-compat. */ +#define D_CXX_CONCEPTS 0x0400 /* In C++, only with concepts. */ +#define D_TRANSMEM 0X0800 /* C++ transactional memory TS. */ +#define D_CXX_CHAR8_T 0X1000 /* In C++, only with -fchar8_t. */ +#define D_CXX20 0x2000 /* In C++, C++20 only. */ +#define D_CXX_COROUTINES 0x4000 /* In C++, only with coroutines. */ +#define D_CXX_MODULES 0x8000 /* In C++, only with modules. */ + +#define D_CXX_CONCEPTS_FLAGS D_CXXONLY | D_CXX_CONCEPTS +#define D_CXX_CHAR8_T_FLAGS D_CXXONLY | D_CXX_CHAR8_T +#define D_CXX_MODULES_FLAGS (D_CXXONLY | D_CXX_MODULES) +#define D_CXX_COROUTINES_FLAGS (D_CXXONLY | D_CXX_COROUTINES) + +/* The reserved keyword table. */ +extern const struct c_common_resword c_common_reswords[]; + +/* The number of items in the reserved keyword table. */ +extern const unsigned int num_c_common_reswords; + +#define char8_type_node c_global_trees[CTI_CHAR8_TYPE] +#define char16_type_node c_global_trees[CTI_CHAR16_TYPE] +#define char32_type_node c_global_trees[CTI_CHAR32_TYPE] +#define wchar_type_node c_global_trees[CTI_WCHAR_TYPE] +#define underlying_wchar_type_node c_global_trees[CTI_UNDERLYING_WCHAR_TYPE] +#define wint_type_node c_global_trees[CTI_WINT_TYPE] +#define signed_size_type_node c_global_trees[CTI_SIGNED_SIZE_TYPE] +#define unsigned_ptrdiff_type_node c_global_trees[CTI_UNSIGNED_PTRDIFF_TYPE] +#define intmax_type_node c_global_trees[CTI_INTMAX_TYPE] +#define uintmax_type_node c_global_trees[CTI_UINTMAX_TYPE] +#define widest_integer_literal_type_node c_global_trees[CTI_WIDEST_INT_LIT_TYPE] +#define widest_unsigned_literal_type_node c_global_trees[CTI_WIDEST_UINT_LIT_TYPE] + +#define sig_atomic_type_node c_global_trees[CTI_SIG_ATOMIC_TYPE] +#define int8_type_node c_global_trees[CTI_INT8_TYPE] +#define int16_type_node c_global_trees[CTI_INT16_TYPE] +#define int32_type_node c_global_trees[CTI_INT32_TYPE] +#define int64_type_node c_global_trees[CTI_INT64_TYPE] +#define uint8_type_node c_global_trees[CTI_UINT8_TYPE] +#define c_uint16_type_node c_global_trees[CTI_UINT16_TYPE] +#define c_uint32_type_node c_global_trees[CTI_UINT32_TYPE] +#define c_uint64_type_node c_global_trees[CTI_UINT64_TYPE] +#define int_least8_type_node c_global_trees[CTI_INT_LEAST8_TYPE] +#define int_least16_type_node c_global_trees[CTI_INT_LEAST16_TYPE] +#define int_least32_type_node c_global_trees[CTI_INT_LEAST32_TYPE] +#define int_least64_type_node c_global_trees[CTI_INT_LEAST64_TYPE] +#define uint_least8_type_node c_global_trees[CTI_UINT_LEAST8_TYPE] +#define uint_least16_type_node c_global_trees[CTI_UINT_LEAST16_TYPE] +#define uint_least32_type_node c_global_trees[CTI_UINT_LEAST32_TYPE] +#define uint_least64_type_node c_global_trees[CTI_UINT_LEAST64_TYPE] +#define int_fast8_type_node c_global_trees[CTI_INT_FAST8_TYPE] +#define int_fast16_type_node c_global_trees[CTI_INT_FAST16_TYPE] +#define int_fast32_type_node c_global_trees[CTI_INT_FAST32_TYPE] +#define int_fast64_type_node c_global_trees[CTI_INT_FAST64_TYPE] +#define uint_fast8_type_node c_global_trees[CTI_UINT_FAST8_TYPE] +#define uint_fast16_type_node c_global_trees[CTI_UINT_FAST16_TYPE] +#define uint_fast32_type_node c_global_trees[CTI_UINT_FAST32_TYPE] +#define uint_fast64_type_node c_global_trees[CTI_UINT_FAST64_TYPE] +#define intptr_type_node c_global_trees[CTI_INTPTR_TYPE] +#define uintptr_type_node c_global_trees[CTI_UINTPTR_TYPE] + +#define truthvalue_type_node c_global_trees[CTI_TRUTHVALUE_TYPE] +#define truthvalue_true_node c_global_trees[CTI_TRUTHVALUE_TRUE] +#define truthvalue_false_node c_global_trees[CTI_TRUTHVALUE_FALSE] + +#define char_array_type_node c_global_trees[CTI_CHAR_ARRAY_TYPE] +#define char8_array_type_node c_global_trees[CTI_CHAR8_ARRAY_TYPE] +#define char16_array_type_node c_global_trees[CTI_CHAR16_ARRAY_TYPE] +#define char32_array_type_node c_global_trees[CTI_CHAR32_ARRAY_TYPE] +#define wchar_array_type_node c_global_trees[CTI_WCHAR_ARRAY_TYPE] +#define string_type_node c_global_trees[CTI_STRING_TYPE] +#define const_string_type_node c_global_trees[CTI_CONST_STRING_TYPE] + +#define default_function_type c_global_trees[CTI_DEFAULT_FUNCTION_TYPE] + +#define function_name_decl_node c_global_trees[CTI_FUNCTION_NAME_DECL] +#define pretty_function_name_decl_node c_global_trees[CTI_PRETTY_FUNCTION_NAME_DECL] +#define c99_function_name_decl_node c_global_trees[CTI_C99_FUNCTION_NAME_DECL] +#define saved_function_name_decls c_global_trees[CTI_SAVED_FUNCTION_NAME_DECLS] + +/* The node for C++ `__null'. */ +#define null_node c_global_trees[CTI_NULL] + +extern GTY(()) tree c_global_trees[CTI_MAX]; + +/* Mark which labels are explicitly declared. + These may be shadowed, and may be referenced from nested functions. */ +#define C_DECLARED_LABEL_FLAG(label) TREE_LANG_FLAG_1 (label) + +enum c_language_kind +{ + clk_c = 0, /* C90, C94, C99, C11 or C2X */ + clk_objc = 1, /* clk_c with ObjC features. */ + clk_cxx = 2, /* ANSI/ISO C++ */ + clk_objcxx = 3 /* clk_cxx with ObjC features. */ +}; + +/* To test for a specific language use c_language, defined by each + front end. For "ObjC features" or "not C++" use the macros. */ +extern c_language_kind c_language; + +#define c_dialect_cxx() ((c_language & clk_cxx) != 0) +#define c_dialect_objc() ((c_language & clk_objc) != 0) + +/* The various name of operator that appears in error messages. */ +enum ref_operator { + /* NULL */ + RO_NULL, + /* array indexing */ + RO_ARRAY_INDEXING, + /* unary * */ + RO_UNARY_STAR, + /* -> */ + RO_ARROW, + /* implicit conversion */ + RO_IMPLICIT_CONVERSION, + /* ->* */ + RO_ARROW_STAR +}; + +/* Information about a statement tree. */ + +struct GTY(()) stmt_tree_s { + /* A stack of statement lists being collected. */ + vec *x_cur_stmt_list; + + /* In C++, Nonzero if we should treat statements as full + expressions. In particular, this variable is non-zero if at the + end of a statement we should destroy any temporaries created + during that statement. Similarly, if, at the end of a block, we + should destroy any local variables in this block. Normally, this + variable is nonzero, since those are the normal semantics of + C++. + + This flag has no effect in C. */ + int stmts_are_full_exprs_p; +}; + +typedef struct stmt_tree_s *stmt_tree; + +/* Global state pertinent to the current function. Some C dialects + extend this structure with additional fields. */ + +struct GTY(()) c_language_function { + /* While we are parsing the function, this contains information + about the statement-tree that we are building. */ + struct stmt_tree_s x_stmt_tree; + + /* Vector of locally defined typedefs, for + -Wunused-local-typedefs. */ + vec *local_typedefs; +}; + +#define stmt_list_stack (current_stmt_tree ()->x_cur_stmt_list) + +/* When building a statement-tree, this is the current statement list + being collected. */ +#define cur_stmt_list (stmt_list_stack->last ()) + +#define building_stmt_list_p() (stmt_list_stack && !stmt_list_stack->is_empty()) + +/* Language-specific hooks. */ + +/* If non-NULL, this function is called after a precompile header file + is loaded. */ +extern void (*lang_post_pch_load) (void); + +extern void push_file_scope (void); +extern void pop_file_scope (void); +extern stmt_tree current_stmt_tree (void); +extern tree push_stmt_list (void); +extern tree pop_stmt_list (tree); +extern tree add_stmt (tree); +extern void push_cleanup (tree, tree, bool); + +extern tree build_modify_expr (location_t, tree, tree, enum tree_code, + location_t, tree, tree); +extern tree build_indirect_ref (location_t, tree, ref_operator); + +extern bool has_c_linkage (const_tree decl); +extern bool c_decl_implicit (const_tree); + +/* Switches common to the C front ends. */ + +/* Nonzero means don't output line number information. */ + +extern char flag_no_line_commands; + +/* Nonzero causes -E output not to be done, but directives such as + #define that have side effects are still obeyed. */ + +extern char flag_no_output; + +/* Nonzero means dump macros in some fashion; contains the 'D', 'M', + 'N' or 'U' of the command line switch. */ + +extern char flag_dump_macros; + +/* Nonzero means pass #include lines through to the output. */ + +extern char flag_dump_includes; + +/* Nonzero means process PCH files while preprocessing. */ + +extern bool flag_pch_preprocess; + +/* The file name to which we should write a precompiled header, or + NULL if no header will be written in this compile. */ + +extern const char *pch_file; + +/* Nonzero if an ISO standard was selected. It rejects macros in the + user's namespace. */ + +extern int flag_iso; + +/* C/ObjC language option variables. */ + + +/* Nonzero means allow type mismatches in conditional expressions; + just make their values `void'. */ + +extern int flag_cond_mismatch; + +/* Nonzero means enable C89 Amendment 1 features. */ + +extern int flag_isoc94; + +/* Nonzero means use the ISO C99 (or later) dialect of C. */ + +extern int flag_isoc99; + +/* Nonzero means use the ISO C11 (or later) dialect of C. */ + +extern int flag_isoc11; + +/* Nonzero means use the ISO C2X dialect of C. */ + +extern int flag_isoc2x; + +/* Nonzero means that we have builtin functions, and main is an int. */ + +extern int flag_hosted; + +/* ObjC language option variables. */ + + +/* Tells the compiler that this is a special run. Do not perform any + compiling, instead we are to test some platform dependent features + and output a C header file with appropriate definitions. */ + +extern int print_struct_values; + +/* Tells the compiler what is the constant string class for ObjC. */ + +extern const char *constant_string_class_name; + + +/* C++ language option variables. */ + +/* The reference version of the ABI for -Wabi. */ + +extern int warn_abi_version; + +/* Return TRUE if one of {flag_abi_version,flag_abi_compat_version} is + less than N and the other is at least N. */ +#define abi_compat_version_crosses(N) \ + (abi_version_at_least(N) \ + != (flag_abi_compat_version == 0 \ + || flag_abi_compat_version >= (N))) + +/* Return TRUE if one of {flag_abi_version,warn_abi_version} is + less than N and the other is at least N, for use by -Wabi. */ +#define abi_version_crosses(N) \ + (abi_version_at_least(N) \ + != (warn_abi_version == 0 \ + || warn_abi_version >= (N))) + +/* The supported C++ dialects. */ + +enum cxx_dialect { + cxx_unset, + /* C++98 with TC1 */ + cxx98, + cxx03 = cxx98, + /* C++11 */ + cxx0x, + cxx11 = cxx0x, + /* C++14 */ + cxx14, + /* C++17 */ + cxx17, + /* C++20 */ + cxx20, + /* C++23 */ + cxx23 +}; + +/* The C++ dialect being used. C++98 is the default. */ +extern enum cxx_dialect cxx_dialect; + +/* Maximum template instantiation depth. This limit is rather + arbitrary, but it exists to limit the time it takes to notice + excessively recursive template instantiations. */ + +extern int max_tinst_depth; + +/* Nonzero means that we should not issue warnings about problems that + occur when the code is executed, because the code being processed + is not expected to be executed. This is set during parsing. This + is used for cases like sizeof() and "0 ? a : b". This is a count, + not a bool, because unexecuted expressions can nest. */ + +extern int c_inhibit_evaluation_warnings; + +/* Whether lexing has been completed, so subsequent preprocessor + errors should use the compiler's input_location. */ + +extern bool done_lexing; + +/* C types are partitioned into three subsets: object, function, and + incomplete types. */ +#define C_TYPE_OBJECT_P(type) \ + (TREE_CODE (type) != FUNCTION_TYPE && TYPE_SIZE (type)) + +#define C_TYPE_INCOMPLETE_P(type) \ + (TREE_CODE (type) != FUNCTION_TYPE && TYPE_SIZE (type) == 0) + +#define C_TYPE_FUNCTION_P(type) \ + (TREE_CODE (type) == FUNCTION_TYPE) + +/* For convenience we define a single macro to identify the class of + object or incomplete types. */ +#define C_TYPE_OBJECT_OR_INCOMPLETE_P(type) \ + (!C_TYPE_FUNCTION_P (type)) + +/* Return true if TYPE is a vector type that should be subject to the GNU + vector extensions (as opposed to a vector type that is used only for + the purposes of defining target-specific built-in functions). */ + +inline bool +gnu_vector_type_p (const_tree type) +{ + return TREE_CODE (type) == VECTOR_TYPE && !TYPE_INDIVISIBLE_P (type); +} + +struct visibility_flags +{ + unsigned inpragma : 1; /* True when in #pragma GCC visibility. */ + unsigned inlines_hidden : 1; /* True when -finlineshidden in effect. */ +}; + +/* These enumerators are possible types of unsafe conversions. */ +enum conversion_safety { + /* The conversion is safe. */ + SAFE_CONVERSION = 0, + /* Another type of conversion with problems. */ + UNSAFE_OTHER, + /* Conversion between signed and unsigned integers. */ + UNSAFE_SIGN, + /* Conversions that reduce the precision of reals including conversions + from reals to integers. */ + UNSAFE_REAL, + /* Conversions from complex to reals or integers, that discard imaginary + component. */ + UNSAFE_IMAGINARY +}; + +/* Global visibility options. */ +extern struct visibility_flags visibility_options; + +/* Attribute table common to the C front ends. */ +extern const struct attribute_spec c_common_attribute_table[]; +extern const struct attribute_spec c_common_format_attribute_table[]; + +/* Pointer to function to lazily generate the VAR_DECL for __FUNCTION__ etc. + ID is the identifier to use, NAME is the string. + TYPE_DEP indicates whether it depends on type of the function or not + (i.e. __PRETTY_FUNCTION__). */ + +extern tree (*make_fname_decl) (location_t, tree, int); + +/* In c-decl.cc and cp/tree.cc. FIXME. */ +extern void c_register_addr_space (const char *str, addr_space_t as); + +/* In c-common.cc. */ +extern bool in_late_binary_op; +extern const char *c_addr_space_name (addr_space_t as); +extern tree identifier_global_value (tree); +extern tree identifier_global_tag (tree); +extern bool names_builtin_p (const char *); +extern tree c_linkage_bindings (tree); +extern void record_builtin_type (enum rid, const char *, tree); +extern tree build_void_list_node (void); +extern void start_fname_decls (void); +extern void finish_fname_decls (void); +extern const char *fname_as_string (int); +extern tree fname_decl (location_t, unsigned, tree); + +extern int check_user_alignment (const_tree, bool, bool); +extern bool check_function_arguments (location_t loc, const_tree, const_tree, + int, tree *, vec *); +extern void check_function_arguments_recurse (void (*) + (void *, tree, + unsigned HOST_WIDE_INT), + void *, tree, + unsigned HOST_WIDE_INT, + opt_code); +extern bool check_builtin_function_arguments (location_t, vec, + tree, tree, int, tree *); +extern void check_function_format (const_tree, tree, int, tree *, + vec *); +extern bool attribute_fallthrough_p (tree); +extern tree handle_format_attribute (tree *, tree, tree, int, bool *); +extern tree handle_format_arg_attribute (tree *, tree, tree, int, bool *); +extern bool c_common_handle_option (size_t, const char *, HOST_WIDE_INT, int, + location_t, + const struct cl_option_handlers *); +extern bool default_handle_c_option (size_t, const char *, int); +extern tree c_common_type_for_mode (machine_mode, int); +extern tree c_common_type_for_size (unsigned int, int); +extern tree c_common_fixed_point_type_for_size (unsigned int, unsigned int, + int, int); +extern tree c_common_unsigned_type (tree); +extern tree c_common_signed_type (tree); +extern tree c_common_signed_or_unsigned_type (int, tree); +extern void c_common_init_ts (void); +extern tree c_build_bitfield_integer_type (unsigned HOST_WIDE_INT, int); +extern enum conversion_safety unsafe_conversion_p (tree, tree, tree, bool); +extern bool decl_with_nonnull_addr_p (const_tree); +extern tree c_fully_fold (tree, bool, bool *, bool = false); +extern tree c_wrap_maybe_const (tree, bool); +extern tree c_common_truthvalue_conversion (location_t, tree); +extern void c_apply_type_quals_to_decl (int, tree); +extern tree c_sizeof_or_alignof_type (location_t, tree, bool, bool, int); +extern tree c_alignof_expr (location_t, tree); +/* Print an error message for invalid operands to arith operation CODE. + NOP_EXPR is used as a special case (see truthvalue_conversion). */ +extern void binary_op_error (rich_location *, enum tree_code, tree, tree); +extern tree fix_string_type (tree); +extern tree convert_and_check (location_t, tree, tree, bool = false); +extern bool c_determine_visibility (tree); +extern bool vector_types_compatible_elements_p (tree, tree); +extern void mark_valid_location_for_stdc_pragma (bool); +extern bool valid_location_for_stdc_pragma_p (void); +extern void set_float_const_decimal64 (void); +extern void clear_float_const_decimal64 (void); +extern bool float_const_decimal64_p (void); + +extern bool keyword_begins_type_specifier (enum rid); +extern bool keyword_is_storage_class_specifier (enum rid); +extern bool keyword_is_type_qualifier (enum rid); +extern bool keyword_is_decl_specifier (enum rid); +extern unsigned max_align_t_align (void); +extern bool cxx_fundamental_alignment_p (unsigned); +extern bool pointer_to_zero_sized_aggr_p (tree); +extern bool bool_promoted_to_int_p (tree); +extern tree fold_for_warn (tree); +extern tree c_common_get_narrower (tree, int *); +extern bool get_attribute_operand (tree, unsigned HOST_WIDE_INT *); +extern void c_common_finalize_early_debug (void); + +/* Used by convert_and_check; in front ends. */ +extern tree convert_init (tree, tree); + +#define c_sizeof(LOC, T) c_sizeof_or_alignof_type (LOC, T, true, false, 1) +#define c_alignof(LOC, T) c_sizeof_or_alignof_type (LOC, T, false, false, 1) + +/* Subroutine of build_binary_op, used for certain operations. */ +extern tree shorten_binary_op (tree result_type, tree op0, tree op1, bool bitwise); + +/* Subroutine of build_binary_op, used for comparison operations. + See if the operands have both been converted from subword integer types + and, if so, perhaps change them both back to their original type. */ +extern tree shorten_compare (location_t, tree *, tree *, tree *, + enum tree_code *); + +extern tree pointer_int_sum (location_t, enum tree_code, tree, tree, + bool = true); + +/* Add qualifiers to a type, in the fashion for C. */ +extern tree c_build_qualified_type (tree, int, tree = NULL_TREE, size_t = 0); + +/* Build tree nodes and builtin functions common to both C and C++ language + frontends. */ +extern void c_common_nodes_and_builtins (void); + +extern void disable_builtin_function (const char *); + +extern void set_compound_literal_name (tree decl); + +extern tree build_va_arg (location_t, tree, tree); + +extern const unsigned int c_family_lang_mask; +extern unsigned int c_common_option_lang_mask (void); +extern void c_common_diagnostics_set_defaults (diagnostic_context *); +extern bool c_common_complain_wrong_lang_p (const struct cl_option *); +extern void c_common_init_options_struct (struct gcc_options *); +extern void c_common_init_options (unsigned int, struct cl_decoded_option *); +extern bool c_common_post_options (const char **); +extern bool c_common_init (void); +extern void c_common_finish (void); +extern void c_common_parse_file (void); +extern FILE *get_dump_info (int, dump_flags_t *); +extern alias_set_type c_common_get_alias_set (tree); +extern void c_register_builtin_type (tree, const char*); +extern bool c_promoting_integer_type_p (const_tree); +extern bool self_promoting_args_p (const_tree); +extern tree strip_pointer_operator (tree); +extern tree strip_pointer_or_array_types (tree); +extern HOST_WIDE_INT c_common_to_target_charset (HOST_WIDE_INT); + +/* This is the basic parsing function. */ +extern void c_parse_file (void); + +extern void c_parse_final_cleanups (void); + +/* These macros provide convenient access to the various _STMT nodes. */ + +/* Nonzero if a given STATEMENT_LIST represents the outermost binding + if a statement expression. */ +#define STATEMENT_LIST_STMT_EXPR(NODE) \ + TREE_LANG_FLAG_1 (STATEMENT_LIST_CHECK (NODE)) + +/* Nonzero if a label has been added to the statement list. */ +#define STATEMENT_LIST_HAS_LABEL(NODE) \ + TREE_LANG_FLAG_3 (STATEMENT_LIST_CHECK (NODE)) + +/* C_MAYBE_CONST_EXPR accessors. */ +#define C_MAYBE_CONST_EXPR_PRE(NODE) \ + TREE_OPERAND (C_MAYBE_CONST_EXPR_CHECK (NODE), 0) +#define C_MAYBE_CONST_EXPR_EXPR(NODE) \ + TREE_OPERAND (C_MAYBE_CONST_EXPR_CHECK (NODE), 1) +#define C_MAYBE_CONST_EXPR_INT_OPERANDS(NODE) \ + TREE_LANG_FLAG_0 (C_MAYBE_CONST_EXPR_CHECK (NODE)) +#define C_MAYBE_CONST_EXPR_NON_CONST(NODE) \ + TREE_LANG_FLAG_1 (C_MAYBE_CONST_EXPR_CHECK (NODE)) +#define EXPR_INT_CONST_OPERANDS(EXPR) \ + (INTEGRAL_TYPE_P (TREE_TYPE (EXPR)) \ + && (TREE_CODE (EXPR) == INTEGER_CST \ + || (TREE_CODE (EXPR) == C_MAYBE_CONST_EXPR \ + && C_MAYBE_CONST_EXPR_INT_OPERANDS (EXPR)))) + +/* In a FIELD_DECL, nonzero if the decl was originally a bitfield. */ +#define DECL_C_BIT_FIELD(NODE) \ + (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) == 1) +#define SET_DECL_C_BIT_FIELD(NODE) \ + (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) = 1) +#define CLEAR_DECL_C_BIT_FIELD(NODE) \ + (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) = 0) + +/* True if the decl was an unnamed bitfield. */ +#define DECL_UNNAMED_BIT_FIELD(NODE) \ + (DECL_C_BIT_FIELD (NODE) && !DECL_NAME (NODE)) + +extern tree do_case (location_t, tree, tree); +extern tree build_stmt (location_t, enum tree_code, ...); +extern tree build_real_imag_expr (location_t, enum tree_code, tree); + +/* These functions must be defined by each front-end which implements + a variant of the C language. They are used in c-common.cc. */ + +extern tree build_unary_op (location_t, enum tree_code, tree, bool); +extern tree build_binary_op (location_t, enum tree_code, tree, tree, bool); +extern tree perform_integral_promotions (tree); + +/* These functions must be defined by each front-end which implements + a variant of the C language. They are used by port files. */ + +extern tree default_conversion (tree); + +/* Given two integer or real types, return the type for their sum. + Given two compatible ANSI C types, returns the merged type. */ + +extern tree common_type (tree, tree); + +extern tree decl_constant_value (tree); + +/* Handle increment and decrement of boolean types. */ +extern tree boolean_increment (enum tree_code, tree); + +extern int case_compare (splay_tree_key, splay_tree_key); + +extern tree c_add_case_label (location_t, splay_tree, tree, tree, tree); +extern bool c_switch_covers_all_cases_p (splay_tree, tree); +extern bool c_block_may_fallthru (const_tree); + +extern tree build_function_call (location_t, tree, tree); + +extern tree build_function_call_vec (location_t, vec, tree, + vec *, vec *, + tree = NULL_TREE); + +extern tree resolve_overloaded_builtin (location_t, tree, vec *); + +extern tree finish_label_address_expr (tree, location_t); + +/* Same function prototype, but the C and C++ front ends have + different implementations. Used in c-common.cc. */ +extern tree lookup_label (tree); +extern tree lookup_name (tree); +extern bool lvalue_p (const_tree); + +extern bool vector_targets_convertible_p (const_tree t1, const_tree t2); +extern bool vector_types_convertible_p (const_tree t1, const_tree t2, bool emit_lax_note); +extern tree c_build_vec_perm_expr (location_t, tree, tree, tree, bool = true); +extern tree c_build_shufflevector (location_t, tree, tree, + const vec &, bool = true); +extern tree c_build_vec_convert (location_t, tree, location_t, tree, bool = true); + +extern void init_c_lex (void); + +extern void c_cpp_builtins (cpp_reader *); +extern void c_cpp_builtins_optimize_pragma (cpp_reader *, tree, tree); +extern bool c_cpp_diagnostic (cpp_reader *, enum cpp_diagnostic_level, + enum cpp_warning_reason, rich_location *, + const char *, va_list *) + ATTRIBUTE_GCC_DIAG(5,0); +extern int c_common_has_attribute (cpp_reader *, bool); +extern int c_common_has_builtin (cpp_reader *); + +extern bool parse_optimize_options (tree, bool); + +/* Positive if an implicit `extern "C"' scope has just been entered; + negative if such a scope has just been exited. */ +extern GTY(()) int pending_lang_change; + +/* Information recorded about each file examined during compilation. */ + +struct c_fileinfo +{ + int time; /* Time spent in the file. */ + + /* Flags used only by C++. + INTERFACE_ONLY nonzero means that we are in an "interface" section + of the compiler. INTERFACE_UNKNOWN nonzero means we cannot trust + the value of INTERFACE_ONLY. If INTERFACE_UNKNOWN is zero and + INTERFACE_ONLY is zero, it means that we are responsible for + exporting definitions that others might need. */ + short interface_only; + short interface_unknown; +}; + +struct c_fileinfo *get_fileinfo (const char *); +extern void dump_time_statistics (void); + +extern bool c_dump_tree (void *, tree); + +extern void verify_sequence_points (tree); + +extern tree fold_offsetof (tree, tree = size_type_node, + tree_code ctx = ERROR_MARK); + +extern int complete_array_type (tree *, tree, bool); +extern void complete_flexible_array_elts (tree); + +extern tree builtin_type_for_size (int, bool); + +extern void c_common_mark_addressable_vec (tree); + +extern void set_underlying_type (tree); +extern bool user_facing_original_type_p (const_tree); +extern void record_types_used_by_current_var_decl (tree); +extern vec *make_tree_vector (void); +extern void release_tree_vector (vec *); +extern vec *make_tree_vector_single (tree); +extern vec *make_tree_vector_from_list (tree); +extern vec *make_tree_vector_from_ctor (tree); +extern vec *make_tree_vector_copy (const vec *); + +/* Used for communication between c_common_type_for_mode and + c_register_builtin_type. */ +extern GTY(()) tree registered_builtin_types; + +/* Read SOURCE_DATE_EPOCH from environment to have a deterministic + timestamp to replace embedded current dates to get reproducible + results. Returns -1 if SOURCE_DATE_EPOCH is not defined. */ +extern time_t cb_get_source_date_epoch (cpp_reader *pfile); + +/* The value (as a unix timestamp) corresponds to date + "Dec 31 9999 23:59:59 UTC", which is the latest date that __DATE__ and + __TIME__ can store. */ +#define MAX_SOURCE_DATE_EPOCH HOST_WIDE_INT_C (253402300799) + +/* Callback for libcpp for offering spelling suggestions for misspelled + directives. */ +extern const char *cb_get_suggestion (cpp_reader *, const char *, + const char *const *); + +extern GTY(()) string_concat_db *g_string_concat_db; + +class substring_loc; +extern const char *c_get_substring_location (const substring_loc &substr_loc, + location_t *out_loc); + +/* In c-gimplify.cc. */ +typedef struct bc_state +{ + tree bc_label[2]; +} bc_state_t; +extern void save_bc_state (bc_state_t *); +extern void restore_bc_state (bc_state_t *); +extern tree c_genericize_control_stmt (tree *, int *, void *, + walk_tree_fn, walk_tree_lh); +extern void c_genericize (tree); +extern int c_gimplify_expr (tree *, gimple_seq *, gimple_seq *); +extern tree c_build_bind_expr (location_t, tree, tree); + +/* In c-lex.cc. */ +extern enum cpp_ttype +conflict_marker_get_final_tok_kind (enum cpp_ttype tok1_kind); + +/* In c-pch.cc */ +extern void pch_init (void); +extern void pch_cpp_save_state (void); +extern int c_common_valid_pch (cpp_reader *pfile, const char *name, int fd); +extern void c_common_read_pch (cpp_reader *pfile, const char *name, int fd, + const char *orig); +extern void c_common_write_pch (void); +extern void c_common_no_more_pch (void); +extern void c_common_pch_pragma (cpp_reader *pfile, const char *); + +/* In *-checksum.c */ +extern const unsigned char executable_checksum[16]; + +/* In c-cppbuiltin.cc */ +extern void builtin_define_std (const char *macro); +extern void builtin_define_with_value (const char *, const char *, int); +extern void builtin_define_with_int_value (const char *, HOST_WIDE_INT); +extern void builtin_define_type_sizeof (const char *, tree); +extern void c_stddef_cpp_builtins (void); +extern void fe_file_change (const line_map_ordinary *); +extern void c_parse_error (const char *, enum cpp_ttype, tree, unsigned char, + rich_location *richloc); + +/* In c-ppoutput.cc */ +extern void init_pp_output (FILE *); +extern void preprocess_file (cpp_reader *); +extern void pp_file_change (const line_map_ordinary *); +extern void pp_dir_change (cpp_reader *, const char *); +extern bool check_missing_format_attribute (tree, tree); + +/* In c-omp.cc */ +typedef wide_int_bitmask omp_clause_mask; + +#define OMP_CLAUSE_MASK_1 omp_clause_mask (1) + +enum c_omp_clause_split +{ + C_OMP_CLAUSE_SPLIT_TARGET = 0, + C_OMP_CLAUSE_SPLIT_TEAMS, + C_OMP_CLAUSE_SPLIT_DISTRIBUTE, + C_OMP_CLAUSE_SPLIT_PARALLEL, + C_OMP_CLAUSE_SPLIT_FOR, + C_OMP_CLAUSE_SPLIT_SIMD, + C_OMP_CLAUSE_SPLIT_COUNT, + C_OMP_CLAUSE_SPLIT_SECTIONS = C_OMP_CLAUSE_SPLIT_FOR, + C_OMP_CLAUSE_SPLIT_TASKLOOP = C_OMP_CLAUSE_SPLIT_FOR, + C_OMP_CLAUSE_SPLIT_LOOP = C_OMP_CLAUSE_SPLIT_FOR, + C_OMP_CLAUSE_SPLIT_MASKED = C_OMP_CLAUSE_SPLIT_DISTRIBUTE +}; + +enum c_omp_region_type +{ + C_ORT_OMP = 1 << 0, + C_ORT_ACC = 1 << 1, + C_ORT_DECLARE_SIMD = 1 << 2, + C_ORT_TARGET = 1 << 3, + C_ORT_OMP_DECLARE_SIMD = C_ORT_OMP | C_ORT_DECLARE_SIMD, + C_ORT_OMP_TARGET = C_ORT_OMP | C_ORT_TARGET +}; + +extern tree c_finish_omp_master (location_t, tree); +extern tree c_finish_omp_masked (location_t, tree, tree); +extern tree c_finish_omp_taskgroup (location_t, tree, tree); +extern tree c_finish_omp_critical (location_t, tree, tree, tree); +extern tree c_finish_omp_ordered (location_t, tree, tree); +extern void c_finish_omp_barrier (location_t); +extern tree c_finish_omp_atomic (location_t, enum tree_code, enum tree_code, + tree, tree, tree, tree, tree, tree, bool, + enum omp_memory_order, bool, bool = false); +extern bool c_omp_depend_t_p (tree); +extern void c_finish_omp_depobj (location_t, tree, enum omp_clause_depend_kind, + tree); +extern void c_finish_omp_flush (location_t, int); +extern void c_finish_omp_taskwait (location_t); +extern void c_finish_omp_taskyield (location_t); +extern tree c_finish_omp_for (location_t, enum tree_code, tree, tree, tree, + tree, tree, tree, tree, bool); +extern bool c_omp_check_loop_iv (tree, tree, walk_tree_lh); +extern bool c_omp_check_loop_iv_exprs (location_t, enum tree_code, tree, int, + tree, tree, tree, walk_tree_lh); +extern tree c_finish_oacc_wait (location_t, tree, tree); +extern tree c_oacc_split_loop_clauses (tree, tree *, bool); +extern void c_omp_split_clauses (location_t, enum tree_code, omp_clause_mask, + tree, tree *); +extern tree c_omp_declare_simd_clauses_to_numbers (tree, tree); +extern void c_omp_declare_simd_clauses_to_decls (tree, tree); +extern bool c_omp_predefined_variable (tree); +extern enum omp_clause_default_kind c_omp_predetermined_sharing (tree); +extern enum omp_clause_defaultmap_kind c_omp_predetermined_mapping (tree); +extern tree c_omp_check_context_selector (location_t, tree); +extern void c_omp_mark_declare_variant (location_t, tree, tree); +extern void c_omp_adjust_map_clauses (tree, bool); + +enum c_omp_directive_kind { + C_OMP_DIR_STANDALONE, + C_OMP_DIR_CONSTRUCT, + C_OMP_DIR_DECLARATIVE, + C_OMP_DIR_UTILITY, + C_OMP_DIR_INFORMATIONAL +}; + +struct c_omp_directive { + const char *first, *second, *third; + unsigned int id; + enum c_omp_directive_kind kind; + bool simd; +}; + +extern const struct c_omp_directive *c_omp_categorize_directive (const char *, + const char *, + const char *); + +/* Return next tree in the chain for chain_next walking of tree nodes. */ +static inline tree +c_tree_chain_next (tree t) +{ + /* TREE_CHAIN of a type is TYPE_STUB_DECL, which is different + kind of object, never a long chain of nodes. Prefer + TYPE_NEXT_VARIANT for types. */ + if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_TYPE_COMMON)) + return TYPE_NEXT_VARIANT (t); + /* Otherwise, if there is TREE_CHAIN, return it. */ + if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_COMMON)) + return TREE_CHAIN (t); + return NULL; +} + +/* Mask used by tm_stmt_attr. */ +#define TM_STMT_ATTR_OUTER 2 +#define TM_STMT_ATTR_ATOMIC 4 +#define TM_STMT_ATTR_RELAXED 8 + +/* Mask used by tm_attr_to_mask and tm_mask_to_attr. Note that these + are ordered specifically such that more restrictive attributes are + at lower bit positions. This fact is known by the C++ tm attribute + inheritance code such that least bit extraction (mask & -mask) results + in the most restrictive attribute. */ +#define TM_ATTR_SAFE 1 +#define TM_ATTR_CALLABLE 2 +#define TM_ATTR_PURE 4 +#define TM_ATTR_IRREVOCABLE 8 +#define TM_ATTR_MAY_CANCEL_OUTER 16 + +/* A suffix-identifier value doublet that represents user-defined literals + for C++-0x. */ +enum overflow_type { + OT_UNDERFLOW = -1, + OT_NONE, + OT_OVERFLOW +}; + +struct GTY(()) tree_userdef_literal { + struct tree_base base; + tree suffix_id; + tree value; + tree num_string; + enum overflow_type overflow; +}; + +#define USERDEF_LITERAL_SUFFIX_ID(NODE) \ + (((struct tree_userdef_literal *)USERDEF_LITERAL_CHECK (NODE))->suffix_id) + +#define USERDEF_LITERAL_VALUE(NODE) \ + (((struct tree_userdef_literal *)USERDEF_LITERAL_CHECK (NODE))->value) + +#define USERDEF_LITERAL_OVERFLOW(NODE) \ + (((struct tree_userdef_literal *)USERDEF_LITERAL_CHECK (NODE))->overflow) + +#define USERDEF_LITERAL_NUM_STRING(NODE) \ + (((struct tree_userdef_literal *)USERDEF_LITERAL_CHECK (NODE))->num_string) + +#define USERDEF_LITERAL_TYPE(NODE) \ + (TREE_TYPE (USERDEF_LITERAL_VALUE (NODE))) + +extern tree build_userdef_literal (tree suffix_id, tree value, + enum overflow_type overflow, + tree num_string); + + +/* WHILE_STMT accessors. These give access to the condition of the + while statement and the body of the while statement, respectively. */ +#define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0) +#define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1) + +/* DO_STMT accessors. These give access to the condition of the do + statement and the body of the do statement, respectively. */ +#define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0) +#define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1) + +/* FOR_STMT accessors. These give access to the init statement, + condition, update expression, and body of the for statement, + respectively. */ +#define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0) +#define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1) +#define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2) +#define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3) +#define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4) + +#define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0) +#define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1) +#define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2) +#define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3) +/* True if there are case labels for all possible values of switch cond, either + because there is a default: case label or because the case label ranges cover + all values. */ +#define SWITCH_STMT_ALL_CASES_P(NODE) \ + TREE_LANG_FLAG_0 (SWITCH_STMT_CHECK (NODE)) +/* True if the body of a switch stmt contains no BREAK_STMTs. */ +#define SWITCH_STMT_NO_BREAK_P(NODE) \ + TREE_LANG_FLAG_2 (SWITCH_STMT_CHECK (NODE)) + + +/* Nonzero if NODE is the target for genericization of 'break' stmts. */ +#define LABEL_DECL_BREAK(NODE) \ + DECL_LANG_FLAG_0 (LABEL_DECL_CHECK (NODE)) + +/* Nonzero if NODE is the target for genericization of 'continue' stmts. */ +#define LABEL_DECL_CONTINUE(NODE) \ + DECL_LANG_FLAG_1 (LABEL_DECL_CHECK (NODE)) + +extern bool convert_vector_to_array_for_subscript (location_t, tree *, tree); + +/* Possibe cases of scalar_to_vector conversion. */ +enum stv_conv { + stv_error, /* Error occurred. */ + stv_nothing, /* Nothing happened. */ + stv_firstarg, /* First argument must be expanded. */ + stv_secondarg /* Second argument must be expanded. */ +}; + +extern enum stv_conv scalar_to_vector (location_t loc, enum tree_code code, + tree op0, tree op1, bool); + +extern tree find_inv_trees (tree *, int *, void *); +extern tree replace_inv_trees (tree *, int *, void *); + +extern bool reject_gcc_builtin (const_tree, location_t = UNKNOWN_LOCATION); +extern bool valid_array_size_p (location_t, const_tree, tree, bool = true); +extern void invalid_array_size_error (location_t, cst_size_error, + const_tree, const_tree); + +/* In c-warn.cc. */ +extern void constant_expression_warning (tree); +extern void constant_expression_error (tree); +extern void overflow_warning (location_t, tree, tree = NULL_TREE); +extern void warn_logical_operator (location_t, enum tree_code, tree, + enum tree_code, tree, enum tree_code, tree); +extern void warn_tautological_cmp (const op_location_t &, enum tree_code, + tree, tree); +extern void warn_logical_not_parentheses (location_t, enum tree_code, tree, + tree); +extern bool warn_if_unused_value (const_tree, location_t, bool = false); +extern bool strict_aliasing_warning (location_t, tree, tree); +extern void sizeof_pointer_memaccess_warning (location_t *, tree, + vec *, tree *, + bool (*) (tree, tree)); +extern void check_main_parameter_types (tree decl); +extern void warnings_for_convert_and_check (location_t, tree, tree, tree); +extern void c_do_switch_warnings (splay_tree, location_t, tree, tree, bool); +extern void warn_for_omitted_condop (location_t, tree); +extern bool warn_for_restrict (unsigned, tree *, unsigned); +extern void warn_for_address_or_pointer_of_packed_member (tree, tree); +extern void warn_parm_array_mismatch (location_t, tree, tree); +extern void maybe_warn_sizeof_array_div (location_t, tree, tree, tree, tree); +extern void do_warn_array_compare (location_t, tree_code, tree, tree); + +/* Places where an lvalue, or modifiable lvalue, may be required. + Used to select diagnostic messages in lvalue_error and + readonly_error. */ +enum lvalue_use { + lv_assign, + lv_increment, + lv_decrement, + lv_addressof, + lv_asm +}; + +extern void lvalue_error (location_t, enum lvalue_use); +extern void invalid_indirection_error (location_t, tree, ref_operator); +extern void readonly_error (location_t, tree, enum lvalue_use); +extern void warn_array_subscript_with_type_char (location_t, tree); +extern void warn_about_parentheses (location_t, + enum tree_code, + enum tree_code, tree, + enum tree_code, tree); +extern void warn_for_unused_label (tree label); +extern void warn_for_div_by_zero (location_t, tree divisor); +extern void warn_for_memset (location_t, tree, tree, int); +extern void warn_for_sign_compare (location_t, + tree orig_op0, tree orig_op1, + tree op0, tree op1, + tree result_type, + enum tree_code resultcode); +extern void do_warn_double_promotion (tree, tree, tree, const char *, + location_t); +extern void do_warn_unused_parameter (tree); +extern void record_locally_defined_typedef (tree); +extern void maybe_record_typedef_use (tree); +extern void maybe_warn_unused_local_typedefs (void); +extern void maybe_warn_bool_compare (location_t, enum tree_code, tree, tree); +extern bool maybe_warn_shift_overflow (location_t, tree, tree); +extern void warn_duplicated_cond_add_or_warn (location_t, tree, vec **); +extern bool diagnose_mismatched_attributes (tree, tree); +extern tree do_warn_duplicated_branches_r (tree *, int *, void *); +extern void warn_for_multistatement_macros (location_t, location_t, + location_t, enum rid); + +/* In c-attribs.cc. */ +extern bool attribute_takes_identifier_p (const_tree); +extern tree handle_deprecated_attribute (tree *, tree, tree, int, bool *); +extern tree handle_unused_attribute (tree *, tree, tree, int, bool *); +extern tree handle_fallthrough_attribute (tree *, tree, tree, int, bool *); +extern int parse_tm_stmt_attr (tree, int); +extern int tm_attr_to_mask (tree); +extern tree tm_mask_to_attr (int); +extern tree find_tm_attribute (tree); +extern const struct attribute_spec::exclusions attr_cold_hot_exclusions[]; +extern const struct attribute_spec::exclusions attr_noreturn_exclusions[]; +extern tree handle_noreturn_attribute (tree *, tree, tree, int, bool *); +extern bool has_attribute (location_t, tree, tree, tree (*)(tree)); +extern tree build_attr_access_from_parms (tree, bool); + +/* In c-format.cc. */ +extern bool valid_format_string_type_p (tree); + +/* A bitmap of flags to positional_argument. */ +enum posargflags { + /* Consider positional attribute argument value zero valid. */ + POSARG_ZERO = 1, + /* Consider positional attribute argument value valid if it refers + to the ellipsis (i.e., beyond the last typed argument). */ + POSARG_ELLIPSIS = 2 +}; + +extern tree positional_argument (const_tree, const_tree, tree, tree_code, + int = 0, int = posargflags ()); + +extern enum flt_eval_method +excess_precision_mode_join (enum flt_eval_method, enum flt_eval_method); + +extern int c_flt_eval_method (bool ts18661_p); +extern void add_no_sanitize_value (tree node, unsigned int flags); + +extern void maybe_add_include_fixit (rich_location *, const char *, bool); +extern void maybe_suggest_missing_token_insertion (rich_location *richloc, + enum cpp_ttype token_type, + location_t prev_token_loc); +extern tree braced_lists_to_strings (tree, tree); + +#if CHECKING_P +namespace selftest { + /* Declarations for specific families of tests within c-family, + by source file, in alphabetical order. */ + extern void c_diagnostic_cc_tests (void); + extern void c_format_cc_tests (void); + extern void c_indentation_cc_tests (void); + extern void c_opt_problem_cc_tests (void); + extern void c_pretty_print_cc_tests (void); + extern void c_spellcheck_cc_tests (void); + + /* The entrypoint for running all of the above tests. */ + extern void c_family_tests (void); +} // namespace selftest +#endif /* #if CHECKING_P */ + +#endif /* ! GCC_C_COMMON_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-objc.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-objc.h new file mode 100644 index 0000000..da0ab3c --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-objc.h @@ -0,0 +1,183 @@ +/* Definitions of Objective-C front-end entry points used for C and C++. + Copyright (C) 1987-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_C_COMMON_OBJC_H +#define GCC_C_COMMON_OBJC_H + +/* ObjC ivar visibility types. */ +enum GTY(()) objc_ivar_visibility_kind { + OBJC_IVAR_VIS_PROTECTED = 0, + OBJC_IVAR_VIS_PUBLIC = 1, + OBJC_IVAR_VIS_PRIVATE = 2, + OBJC_IVAR_VIS_PACKAGE = 3 +}; + +/* ObjC property attribute kinds. + These have two fields; a unique value (that identifies which attribute) + and a group key that indicates membership of an exclusion group. + Only one member may be present from an exclusion group in a given attribute + list. + getters and setters have additional rules, since they are excluded from + non-overlapping group sets. */ + +enum objc_property_attribute_group +{ + OBJC_PROPATTR_GROUP_UNKNOWN = 0, + OBJC_PROPATTR_GROUP_GETTER, + OBJC_PROPATTR_GROUP_SETTER, + OBJC_PROPATTR_GROUP_READWRITE, + OBJC_PROPATTR_GROUP_ASSIGN, + OBJC_PROPATTR_GROUP_ATOMIC, + OBJC_PROPATTR_GROUP_NULLABLE, + OBJC_PROPATTR_GROUP_CLASS, + OBJC_PROPATTR_GROUP_MAX +}; + +enum objc_property_attribute_kind +{ + OBJC_PROPERTY_ATTR_UNKNOWN = 0|OBJC_PROPATTR_GROUP_UNKNOWN, + OBJC_PROPERTY_ATTR_GETTER = ( 1 << 8)|OBJC_PROPATTR_GROUP_GETTER, + OBJC_PROPERTY_ATTR_SETTER = ( 2 << 8)|OBJC_PROPATTR_GROUP_SETTER, + OBJC_PROPERTY_ATTR_READONLY = ( 3 << 8)|OBJC_PROPATTR_GROUP_READWRITE, + OBJC_PROPERTY_ATTR_READWRITE = ( 4 << 8)|OBJC_PROPATTR_GROUP_READWRITE, + OBJC_PROPERTY_ATTR_ASSIGN = ( 5 << 8)|OBJC_PROPATTR_GROUP_ASSIGN, + OBJC_PROPERTY_ATTR_RETAIN = ( 6 << 8)|OBJC_PROPATTR_GROUP_ASSIGN, + OBJC_PROPERTY_ATTR_COPY = ( 7 << 8)|OBJC_PROPATTR_GROUP_ASSIGN, + OBJC_PROPERTY_ATTR_ATOMIC = ( 8 << 8)|OBJC_PROPATTR_GROUP_ATOMIC, + OBJC_PROPERTY_ATTR_NONATOMIC = ( 9 << 8)|OBJC_PROPATTR_GROUP_ATOMIC, + OBJC_PROPERTY_ATTR_NULL_UNSPECIFIED = (12 << 8)|OBJC_PROPATTR_GROUP_NULLABLE, + OBJC_PROPERTY_ATTR_NULLABLE = (13 << 8)|OBJC_PROPATTR_GROUP_NULLABLE, + OBJC_PROPERTY_ATTR_NONNULL = (14 << 8)|OBJC_PROPATTR_GROUP_NULLABLE, + OBJC_PROPERTY_ATTR_NULL_RESETTABLE = (15 << 8)|OBJC_PROPATTR_GROUP_NULLABLE, + OBJC_PROPERTY_ATTR_CLASS = (16 << 8)|OBJC_PROPATTR_GROUP_CLASS, + OBJC_PROPERTY_ATTR_MAX = (255 << 8|OBJC_PROPATTR_GROUP_MAX) +}; + +#define OBJC_PROPATTR_GROUP_MASK 0x0f + +/* To contain parsed, but unverified, information about a single property + attribute. */ +struct property_attribute_info +{ + property_attribute_info () = default; + property_attribute_info (tree name, location_t loc, + enum objc_property_attribute_kind k) + : name (name), ident (NULL_TREE), prop_loc (loc), prop_kind (k), + parse_error (false) {} + + enum objc_property_attribute_group group () + { + return (enum objc_property_attribute_group) + ((unsigned)prop_kind & OBJC_PROPATTR_GROUP_MASK); + } + + tree name; /* Name of the attribute. */ + tree ident; /* For getter/setter cases, the method/selector name. */ + location_t prop_loc; /* Extended location covering the parsed attr. */ + enum objc_property_attribute_kind prop_kind : 16; + unsigned parse_error : 1; /* The C/C++ parser saw an error in this attr. */ +}; + +extern enum objc_property_attribute_kind objc_prop_attr_kind_for_rid (enum rid); + +/* Objective-C / Objective-C++ entry points. */ + +/* The following ObjC/ObjC++ functions are called by the C and/or C++ + front-ends; they all must have corresponding stubs in stub-objc.cc. */ +extern void objc_write_global_declarations (void); +extern tree objc_is_class_name (tree); +extern tree objc_is_object_ptr (tree); +extern void objc_check_decl (tree); +extern void objc_check_global_decl (tree); +extern tree objc_common_type (tree, tree); +extern bool objc_compare_types (tree, tree, int, tree); +extern bool objc_have_common_type (tree, tree, int, tree); +extern bool objc_diagnose_private_ivar (tree); +extern void objc_volatilize_decl (tree); +extern tree objc_rewrite_function_call (tree, tree); +extern tree objc_message_selector (void); +extern tree objc_lookup_ivar (tree, tree); +extern void objc_clear_super_receiver (void); +extern int objc_is_public (tree, tree); +extern tree objc_is_id (tree); +extern void objc_declare_alias (tree, tree); +extern void objc_declare_class (tree); +extern void objc_declare_protocol (tree, tree); +extern tree objc_build_message_expr (tree, tree); +extern tree objc_finish_message_expr (tree, tree, tree, tree*); +extern tree objc_build_selector_expr (location_t, tree); +extern tree objc_build_protocol_expr (tree); +extern tree objc_build_encode_expr (tree); +extern tree objc_build_string_object (tree); +extern tree objc_get_protocol_qualified_type (tree, tree); +extern tree objc_get_class_reference (tree); +extern tree objc_get_class_ivars (tree); +extern bool objc_detect_field_duplicates (bool); +extern void objc_start_class_interface (tree, location_t, tree, tree, tree); +extern void objc_start_category_interface (tree, tree, tree, tree); +extern void objc_start_protocol (tree, tree, tree); +extern void objc_continue_interface (void); +extern void objc_finish_interface (void); +extern void objc_start_class_implementation (tree, tree); +extern void objc_start_category_implementation (tree, tree); +extern void objc_continue_implementation (void); +extern void objc_finish_implementation (void); +extern void objc_set_visibility (objc_ivar_visibility_kind); +extern tree objc_build_method_signature (bool, tree, tree, tree, bool); +extern void objc_add_method_declaration (bool, tree, tree); +extern bool objc_start_method_definition (bool, tree, tree, tree); +extern void objc_finish_method_definition (tree); +extern void objc_add_instance_variable (tree); +extern tree objc_build_keyword_decl (tree, tree, tree, tree); +extern tree objc_build_throw_stmt (location_t, tree); +extern void objc_begin_try_stmt (location_t, tree); +extern tree objc_finish_try_stmt (void); +extern void objc_begin_catch_clause (tree); +extern void objc_finish_catch_clause (void); +extern void objc_build_finally_clause (location_t, tree); +extern tree objc_build_synchronized (location_t, tree, tree); +extern int objc_static_init_needed_p (void); +extern tree objc_generate_static_init_call (tree); +extern tree objc_generate_write_barrier (tree, enum tree_code, tree); +extern void objc_set_method_opt (bool); +extern void objc_finish_foreach_loop (location_t, tree, tree, tree, tree, tree); +extern bool objc_method_decl (enum tree_code); +extern void objc_add_property_declaration (location_t, tree, + vec&); +extern tree objc_maybe_build_component_ref (tree, tree); +extern tree objc_build_class_component_ref (tree, tree); +extern tree objc_maybe_build_modify_expr (tree, tree); +extern tree objc_build_incr_expr_for_property_ref (location_t, enum tree_code, + tree, tree); +extern void objc_add_synthesize_declaration (location_t, tree); +extern void objc_add_dynamic_declaration (location_t, tree); +extern const char * objc_maybe_printable_name (tree, int); +extern bool objc_is_property_ref (tree); +extern bool objc_non_constant_expr_p (tree); +extern bool objc_string_ref_type_p (tree); +extern void objc_check_format_arg (tree, tree); +extern void objc_finish_function (void); +extern void objc_maybe_warn_exceptions (location_t); + +/* The following are provided by the C and C++ front-ends, and called by + ObjC/ObjC++. */ +extern void *objc_get_current_scope (void); +extern void objc_mark_locals_volatile (void *); + +#endif /* ! GCC_C_COMMON_OBJC_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-pragma.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-pragma.h new file mode 100644 index 0000000..54864c2 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-pragma.h @@ -0,0 +1,270 @@ +/* Pragma related interfaces. + Copyright (C) 1995-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_C_PRAGMA_H +#define GCC_C_PRAGMA_H + +#include "cpplib.h" /* For enum cpp_ttype. */ + +/* Pragma identifiers built in to the front end parsers. Identifiers + for ancillary handlers will follow these. */ +enum pragma_kind { + PRAGMA_NONE = 0, + + PRAGMA_OACC_ATOMIC, + PRAGMA_OACC_CACHE, + PRAGMA_OACC_DATA, + PRAGMA_OACC_DECLARE, + PRAGMA_OACC_ENTER_DATA, + PRAGMA_OACC_EXIT_DATA, + PRAGMA_OACC_HOST_DATA, + PRAGMA_OACC_KERNELS, + PRAGMA_OACC_LOOP, + PRAGMA_OACC_PARALLEL, + PRAGMA_OACC_ROUTINE, + PRAGMA_OACC_SERIAL, + PRAGMA_OACC_UPDATE, + PRAGMA_OACC_WAIT, + + /* PRAGMA_OMP__START_ should be equal to the first PRAGMA_OMP_* code. */ + PRAGMA_OMP_ALLOCATE, + PRAGMA_OMP__START_ = PRAGMA_OMP_ALLOCATE, + PRAGMA_OMP_ATOMIC, + PRAGMA_OMP_BARRIER, + PRAGMA_OMP_CANCEL, + PRAGMA_OMP_CANCELLATION_POINT, + PRAGMA_OMP_CRITICAL, + PRAGMA_OMP_DECLARE, + PRAGMA_OMP_DEPOBJ, + PRAGMA_OMP_DISTRIBUTE, + PRAGMA_OMP_ERROR, + PRAGMA_OMP_END_DECLARE_TARGET, + PRAGMA_OMP_FLUSH, + PRAGMA_OMP_FOR, + PRAGMA_OMP_LOOP, + PRAGMA_OMP_NOTHING, + PRAGMA_OMP_MASKED, + PRAGMA_OMP_MASTER, + PRAGMA_OMP_ORDERED, + PRAGMA_OMP_PARALLEL, + PRAGMA_OMP_REQUIRES, + PRAGMA_OMP_SCAN, + PRAGMA_OMP_SCOPE, + PRAGMA_OMP_SECTION, + PRAGMA_OMP_SECTIONS, + PRAGMA_OMP_SIMD, + PRAGMA_OMP_SINGLE, + PRAGMA_OMP_TARGET, + PRAGMA_OMP_TASK, + PRAGMA_OMP_TASKGROUP, + PRAGMA_OMP_TASKLOOP, + PRAGMA_OMP_TASKWAIT, + PRAGMA_OMP_TASKYIELD, + PRAGMA_OMP_THREADPRIVATE, + PRAGMA_OMP_TEAMS, + /* PRAGMA_OMP__LAST_ should be equal to the last PRAGMA_OMP_* code. */ + PRAGMA_OMP__LAST_ = PRAGMA_OMP_TEAMS, + + PRAGMA_GCC_PCH_PREPROCESS, + PRAGMA_IVDEP, + PRAGMA_UNROLL, + + PRAGMA_FIRST_EXTERNAL +}; + + +/* All clauses defined by OpenACC 2.0, and OpenMP 2.5, 3.0, 3.1, 4.0, 4.5, 5.0, + and 5.1. Used internally by both C and C++ parsers. */ +enum pragma_omp_clause { + PRAGMA_OMP_CLAUSE_NONE = 0, + + PRAGMA_OMP_CLAUSE_AFFINITY, + PRAGMA_OMP_CLAUSE_ALIGNED, + PRAGMA_OMP_CLAUSE_ALLOCATE, + PRAGMA_OMP_CLAUSE_BIND, + PRAGMA_OMP_CLAUSE_COLLAPSE, + PRAGMA_OMP_CLAUSE_COPYIN, + PRAGMA_OMP_CLAUSE_COPYPRIVATE, + PRAGMA_OMP_CLAUSE_DEFAULT, + PRAGMA_OMP_CLAUSE_DEFAULTMAP, + PRAGMA_OMP_CLAUSE_DEPEND, + PRAGMA_OMP_CLAUSE_DETACH, + PRAGMA_OMP_CLAUSE_DEVICE, + PRAGMA_OMP_CLAUSE_DEVICE_TYPE, + PRAGMA_OMP_CLAUSE_DIST_SCHEDULE, + PRAGMA_OMP_CLAUSE_FILTER, + PRAGMA_OMP_CLAUSE_FINAL, + PRAGMA_OMP_CLAUSE_FIRSTPRIVATE, + PRAGMA_OMP_CLAUSE_FOR, + PRAGMA_OMP_CLAUSE_FROM, + PRAGMA_OMP_CLAUSE_GRAINSIZE, + PRAGMA_OMP_CLAUSE_HAS_DEVICE_ADDR, + PRAGMA_OMP_CLAUSE_HINT, + PRAGMA_OMP_CLAUSE_IF, + PRAGMA_OMP_CLAUSE_IN_REDUCTION, + PRAGMA_OMP_CLAUSE_INBRANCH, + PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR, + PRAGMA_OMP_CLAUSE_LASTPRIVATE, + PRAGMA_OMP_CLAUSE_LINEAR, + PRAGMA_OMP_CLAUSE_LINK, + PRAGMA_OMP_CLAUSE_MAP, + PRAGMA_OMP_CLAUSE_MERGEABLE, + PRAGMA_OMP_CLAUSE_NOGROUP, + PRAGMA_OMP_CLAUSE_NONTEMPORAL, + PRAGMA_OMP_CLAUSE_NOTINBRANCH, + PRAGMA_OMP_CLAUSE_NOWAIT, + PRAGMA_OMP_CLAUSE_NUM_TASKS, + PRAGMA_OMP_CLAUSE_NUM_TEAMS, + PRAGMA_OMP_CLAUSE_NUM_THREADS, + PRAGMA_OMP_CLAUSE_ORDER, + PRAGMA_OMP_CLAUSE_ORDERED, + PRAGMA_OMP_CLAUSE_PARALLEL, + PRAGMA_OMP_CLAUSE_PRIORITY, + PRAGMA_OMP_CLAUSE_PRIVATE, + PRAGMA_OMP_CLAUSE_PROC_BIND, + PRAGMA_OMP_CLAUSE_REDUCTION, + PRAGMA_OMP_CLAUSE_SAFELEN, + PRAGMA_OMP_CLAUSE_SCHEDULE, + PRAGMA_OMP_CLAUSE_SECTIONS, + PRAGMA_OMP_CLAUSE_SHARED, + PRAGMA_OMP_CLAUSE_SIMD, + PRAGMA_OMP_CLAUSE_SIMDLEN, + PRAGMA_OMP_CLAUSE_TASK_REDUCTION, + PRAGMA_OMP_CLAUSE_TASKGROUP, + PRAGMA_OMP_CLAUSE_THREAD_LIMIT, + PRAGMA_OMP_CLAUSE_THREADS, + PRAGMA_OMP_CLAUSE_TO, + PRAGMA_OMP_CLAUSE_UNIFORM, + PRAGMA_OMP_CLAUSE_UNTIED, + PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR, + PRAGMA_OMP_CLAUSE_USE_DEVICE_ADDR, + + /* Clauses for OpenACC. */ + PRAGMA_OACC_CLAUSE_ASYNC, + PRAGMA_OACC_CLAUSE_ATTACH, + PRAGMA_OACC_CLAUSE_AUTO, + PRAGMA_OACC_CLAUSE_COPY, + PRAGMA_OACC_CLAUSE_COPYOUT, + PRAGMA_OACC_CLAUSE_CREATE, + PRAGMA_OACC_CLAUSE_DELETE, + PRAGMA_OACC_CLAUSE_DEVICEPTR, + PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT, + PRAGMA_OACC_CLAUSE_FINALIZE, + PRAGMA_OACC_CLAUSE_GANG, + PRAGMA_OACC_CLAUSE_HOST, + PRAGMA_OACC_CLAUSE_INDEPENDENT, + PRAGMA_OACC_CLAUSE_NO_CREATE, + PRAGMA_OACC_CLAUSE_NOHOST, + PRAGMA_OACC_CLAUSE_NUM_GANGS, + PRAGMA_OACC_CLAUSE_NUM_WORKERS, + PRAGMA_OACC_CLAUSE_PRESENT, + PRAGMA_OACC_CLAUSE_SELF, + PRAGMA_OACC_CLAUSE_SEQ, + PRAGMA_OACC_CLAUSE_TILE, + PRAGMA_OACC_CLAUSE_VECTOR, + PRAGMA_OACC_CLAUSE_VECTOR_LENGTH, + PRAGMA_OACC_CLAUSE_WAIT, + PRAGMA_OACC_CLAUSE_WORKER, + PRAGMA_OACC_CLAUSE_IF_PRESENT, + PRAGMA_OACC_CLAUSE_COLLAPSE = PRAGMA_OMP_CLAUSE_COLLAPSE, + PRAGMA_OACC_CLAUSE_COPYIN = PRAGMA_OMP_CLAUSE_COPYIN, + PRAGMA_OACC_CLAUSE_DEVICE = PRAGMA_OMP_CLAUSE_DEVICE, + PRAGMA_OACC_CLAUSE_DEFAULT = PRAGMA_OMP_CLAUSE_DEFAULT, + PRAGMA_OACC_CLAUSE_DETACH = PRAGMA_OMP_CLAUSE_DETACH, + PRAGMA_OACC_CLAUSE_FIRSTPRIVATE = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE, + PRAGMA_OACC_CLAUSE_IF = PRAGMA_OMP_CLAUSE_IF, + PRAGMA_OACC_CLAUSE_PRIVATE = PRAGMA_OMP_CLAUSE_PRIVATE, + PRAGMA_OACC_CLAUSE_REDUCTION = PRAGMA_OMP_CLAUSE_REDUCTION, + PRAGMA_OACC_CLAUSE_LINK = PRAGMA_OMP_CLAUSE_LINK, + PRAGMA_OACC_CLAUSE_USE_DEVICE = PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR +}; + +extern struct cpp_reader* parse_in; + +/* It's safe to always leave visibility pragma enabled as if + visibility is not supported on the host OS platform the + statements are ignored. */ +extern void push_visibility (const char *, int); +extern bool pop_visibility (int); + +extern void init_pragma (void); + +/* Front-end wrappers for pragma registration. */ +typedef void (*pragma_handler_1arg)(struct cpp_reader *); +/* A second pragma handler, which adds a void * argument allowing to pass extra + data to the handler. */ +typedef void (*pragma_handler_2arg)(struct cpp_reader *, void *); + +/* This union allows to abstract the different handlers. */ +union gen_pragma_handler { + pragma_handler_1arg handler_1arg; + pragma_handler_2arg handler_2arg; +}; +/* Internally used to keep the data of the handler. */ +struct internal_pragma_handler { + union gen_pragma_handler handler; + /* Permits to know if handler is a pragma_handler_1arg (extra_data is false) + or a pragma_handler_2arg (extra_data is true). */ + bool extra_data; + /* A data field which can be used when extra_data is true. */ + void * data; +}; + +extern void c_register_pragma (const char *space, const char *name, + pragma_handler_1arg handler); +extern void c_register_pragma_with_data (const char *space, const char *name, + pragma_handler_2arg handler, + void *data); + +extern void c_register_pragma_with_expansion (const char *space, + const char *name, + pragma_handler_1arg handler); +extern void c_register_pragma_with_expansion_and_data (const char *space, + const char *name, + pragma_handler_2arg handler, + void *data); +extern void c_invoke_pragma_handler (unsigned int); + +extern void maybe_apply_pragma_weak (tree); +extern void maybe_apply_pending_pragma_weaks (void); +extern tree maybe_apply_renaming_pragma (tree, tree); +extern void maybe_apply_pragma_scalar_storage_order (tree); +extern void add_to_renaming_pragma_list (tree, tree); + +extern enum cpp_ttype pragma_lex (tree *, location_t *loc = NULL); + +/* Flags for use with c_lex_with_flags. The values here were picked + so that 0 means to translate and join strings. */ +#define C_LEX_STRING_NO_TRANSLATE 1 /* Do not lex strings into + execution character set. */ +#define C_LEX_STRING_NO_JOIN 2 /* Do not concatenate strings + nor translate them into execution + character set. */ + +/* This is not actually available to pragma parsers. It's merely a + convenient location to declare this function for c-lex, after + having enum cpp_ttype declared. */ +extern enum cpp_ttype c_lex_with_flags (tree *, location_t *, unsigned char *, + int); + +extern void c_pp_lookup_pragma (unsigned int, const char **, const char **); + +extern GTY(()) tree pragma_extern_prefix; + +#endif /* GCC_C_PRAGMA_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-pretty-print.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-pretty-print.h new file mode 100644 index 0000000..ba7624d --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-family/c-pretty-print.h @@ -0,0 +1,143 @@ +/* Various declarations for the C and C++ pretty-printers. + Copyright (C) 2002-2022 Free Software Foundation, Inc. + Contributed by Gabriel Dos Reis + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_C_PRETTY_PRINTER +#define GCC_C_PRETTY_PRINTER + +#include "tree.h" +#include "c-family/c-common.h" +#include "pretty-print.h" + + +enum pp_c_pretty_print_flags + { + pp_c_flag_abstract = 1 << 1, + pp_c_flag_gnu_v3 = 1 << 2, + pp_c_flag_last_bit = 3 + }; + + +/* The data type used to bundle information necessary for pretty-printing + a C or C++ entity. */ +class c_pretty_printer; + +/* The type of a C pretty-printer 'member' function. */ +typedef void (*c_pretty_print_fn) (c_pretty_printer *, tree); + +/* The datatype that contains information necessary for pretty-printing + a tree that represents a C construct. Any pretty-printer for a + language using C syntax can derive from this datatype and reuse + facilities provided here. A derived pretty-printer can override + any function listed in the vtable below. See cp/cxx-pretty-print.h + and cp/cxx-pretty-print.cc for an example of derivation. */ +class c_pretty_printer : public pretty_printer +{ +public: + c_pretty_printer (); + pretty_printer *clone () const OVERRIDE; + + // Format string, possibly translated. + void translate_string (const char *); + + virtual void constant (tree); + virtual void id_expression (tree); + virtual void primary_expression (tree); + virtual void postfix_expression (tree); + virtual void unary_expression (tree); + virtual void multiplicative_expression (tree); + virtual void conditional_expression (tree); + virtual void assignment_expression (tree); + virtual void expression (tree); + + virtual void type_id (tree); + virtual void statement (tree); + + virtual void declaration (tree); + virtual void declaration_specifiers (tree); + virtual void simple_type_specifier (tree); + virtual void function_specifier (tree); + virtual void storage_class_specifier (tree); + virtual void declarator (tree); + virtual void direct_declarator (tree); + virtual void abstract_declarator (tree); + virtual void direct_abstract_declarator (tree); + + virtual void initializer (tree); + /* Points to the first element of an array of offset-list. + Not used yet. */ + int *offset_list; + + pp_flags flags; + + /* These must be overridden by each of the C and C++ front-end to + reflect their understanding of syntactic productions when they differ. */ + c_pretty_print_fn type_specifier_seq; + c_pretty_print_fn ptr_operator; + c_pretty_print_fn parameter_list; +}; + +#define pp_c_tree_identifier(PPI, ID) \ + pp_c_identifier (PPI, IDENTIFIER_POINTER (ID)) + +#define pp_type_specifier_seq(PP, D) (PP)->type_specifier_seq (PP, D) +#define pp_ptr_operator(PP, D) (PP)->ptr_operator (PP, D) +#define pp_parameter_list(PP, T) (PP)->parameter_list (PP, T) + +void pp_c_whitespace (c_pretty_printer *); +void pp_c_left_paren (c_pretty_printer *); +void pp_c_right_paren (c_pretty_printer *); +void pp_c_left_brace (c_pretty_printer *); +void pp_c_right_brace (c_pretty_printer *); +void pp_c_left_bracket (c_pretty_printer *); +void pp_c_right_bracket (c_pretty_printer *); +void pp_c_dot (c_pretty_printer *); +void pp_c_ampersand (c_pretty_printer *); +void pp_c_star (c_pretty_printer *); +void pp_c_arrow (c_pretty_printer *); +void pp_c_semicolon (c_pretty_printer *); +void pp_c_complement (c_pretty_printer *); +void pp_c_exclamation (c_pretty_printer *); +void pp_c_space_for_pointer_operator (c_pretty_printer *, tree); + +/* Declarations. */ +void pp_c_tree_decl_identifier (c_pretty_printer *, tree); +void pp_c_function_definition (c_pretty_printer *, tree); +void pp_c_attributes (c_pretty_printer *, tree); +void pp_c_attributes_display (c_pretty_printer *, tree); +void pp_c_cv_qualifiers (c_pretty_printer *pp, int qualifiers, bool func_type); +void pp_c_type_qualifier_list (c_pretty_printer *, tree); +void pp_c_parameter_type_list (c_pretty_printer *, tree); +void pp_c_specifier_qualifier_list (c_pretty_printer *, tree); +/* Expressions. */ +void pp_c_logical_or_expression (c_pretty_printer *, tree); +void pp_c_expression_list (c_pretty_printer *, tree); +void pp_c_constructor_elts (c_pretty_printer *, vec *); +void pp_c_call_argument_list (c_pretty_printer *, tree); +void pp_c_type_cast (c_pretty_printer *, tree); +void pp_c_cast_expression (c_pretty_printer *, tree); +void pp_c_init_declarator (c_pretty_printer *, tree); +void pp_c_ws_string (c_pretty_printer *, const char *); +void pp_c_identifier (c_pretty_printer *, const char *); +void pp_c_string_literal (c_pretty_printer *, tree); +void pp_c_integer_constant (c_pretty_printer *, tree); + +void print_c_tree (FILE *file, tree t); + +#endif /* GCC_C_PRETTY_PRINTER */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-tree.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-tree.h new file mode 100644 index 0000000..c70f0ba --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/c-tree.h @@ -0,0 +1,845 @@ +/* Definitions for C parsing and type checking. + Copyright (C) 1987-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_C_TREE_H +#define GCC_C_TREE_H + +#include "c-family/c-common.h" +#include "diagnostic.h" + +/* struct lang_identifier is private to c-decl.cc, but langhooks.cc needs to + know how big it is. This is sanity-checked in c-decl.cc. */ +#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ + (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) + +/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) + +/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ +#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) + +/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE + nonzero if the definition of the type has already started. */ +#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) + +/* In an incomplete RECORD_TYPE, UNION_TYPE or ENUMERAL_TYPE, a list of + variable declarations whose type would be completed by completing + that type. */ +#define C_TYPE_INCOMPLETE_VARS(TYPE) \ + TYPE_LANG_SLOT_1 (TREE_CHECK4 (TYPE, RECORD_TYPE, UNION_TYPE, \ + QUAL_UNION_TYPE, ENUMERAL_TYPE)) + +/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a + keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */ +#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) + +/* Record whether a type or decl was written with nonconstant size. + Note that TYPE_SIZE may have simplified to a constant. */ +#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) +#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) + +/* Record whether a type is defined inside a struct or union type. + This is used for -Wc++-compat. */ +#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE) + +/* Record whether a typedef for type `int' was actually `signed int'. */ +#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) + +/* For a FUNCTION_DECL, nonzero if it was defined without an explicit + return type. */ +#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) + +/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ +#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) + +/* For a PARM_DECL, nonzero if it was declared as an array. */ +#define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE) + +/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has + been declared. */ +#define C_DECL_DECLARED_BUILTIN(EXP) \ + DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) + +/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a + built-in prototype and does not have a non-built-in prototype. */ +#define C_DECL_BUILTIN_PROTOTYPE(EXP) \ + DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) + +/* Record whether a decl was declared register. This is strictly a + front-end flag, whereas DECL_REGISTER is used for code generation; + they may differ for structures with volatile fields. */ +#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) + +/* Record whether a decl was used in an expression anywhere except an + unevaluated operand of sizeof / typeof / alignof. This is only + used for functions declared static but not defined, though outside + sizeof and typeof it is set for other function decls as well. */ +#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) + +/* Record whether a variable has been declared threadprivate by + #pragma omp threadprivate. */ +#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) + +/* Set on VAR_DECLs for compound literals. */ +#define C_DECL_COMPOUND_LITERAL_P(DECL) \ + DECL_LANG_FLAG_5 (VAR_DECL_CHECK (DECL)) + +/* Nonzero for a decl which either doesn't exist or isn't a prototype. + N.B. Could be simplified if all built-in decls had complete prototypes + (but this is presently difficult because some of them need FILE*). */ +#define C_DECL_ISNT_PROTOTYPE(EXP) \ + (EXP == 0 \ + || (!prototype_p (TREE_TYPE (EXP)) \ + && !fndecl_built_in_p (EXP))) + +/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as + TYPE_ARG_TYPES for functions with prototypes, but created for functions + without prototypes. */ +#define TYPE_ACTUAL_ARG_TYPES(NODE) \ + TYPE_LANG_SLOT_1 (FUNCTION_TYPE_CHECK (NODE)) + +/* For a CONSTRUCTOR, whether some initializer contains a + subexpression meaning it is not a constant expression. */ +#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR)) + +/* For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already + been folded. */ +#define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1 (SAVE_EXPR_CHECK (EXP)) + +/* Record parser information about an expression that is irrelevant + for code generation alongside a tree representing its value. */ +struct c_expr +{ + /* The value of the expression. */ + tree value; + /* Record the original unary/binary operator of an expression, which may + have been changed by fold, STRING_CST for unparenthesized string + constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls + (even if parenthesized), for subexpressions, and for non-constant + initializers, or ERROR_MARK for other expressions (including + parenthesized expressions). */ + enum tree_code original_code; + /* If not NULL, the original type of an expression. This will + differ from the type of the value field for an enum constant. + The type of an enum constant is a plain integer type, but this + field will be the enum type. */ + tree original_type; + + /* The source range of this expression. This is redundant + for node values that have locations, but not all node kinds + have locations (e.g. constants, and references to params, locals, + etc), so we stash a copy here. */ + source_range src_range; + + /* Access to the first and last locations within the source spelling + of this expression. */ + location_t get_start () const { return src_range.m_start; } + location_t get_finish () const { return src_range.m_finish; } + + location_t get_location () const + { + if (EXPR_HAS_LOCATION (value)) + return EXPR_LOCATION (value); + else + return make_location (get_start (), get_start (), get_finish ()); + } + + /* Set the value to error_mark_node whilst ensuring that src_range + is initialized. */ + void set_error () + { + value = error_mark_node; + src_range.m_start = UNKNOWN_LOCATION; + src_range.m_finish = UNKNOWN_LOCATION; + } +}; + +/* Type alias for struct c_expr. This allows to use the structure + inside the VEC types. */ +typedef struct c_expr c_expr_t; + +/* A kind of type specifier. Note that this information is currently + only used to distinguish tag definitions, tag references and typeof + uses. */ +enum c_typespec_kind { + /* No typespec. This appears only in struct c_declspec. */ + ctsk_none, + /* A reserved keyword type specifier. */ + ctsk_resword, + /* A reference to a tag, previously declared, such as "struct foo". + This includes where the previous declaration was as a different + kind of tag, in which case this is only valid if shadowing that + tag in an inner scope. */ + ctsk_tagref, + /* Likewise, with standard attributes present in the reference. */ + ctsk_tagref_attrs, + /* A reference to a tag, not previously declared in a visible + scope. */ + ctsk_tagfirstref, + /* Likewise, with standard attributes present in the reference. */ + ctsk_tagfirstref_attrs, + /* A definition of a tag such as "struct foo { int a; }". */ + ctsk_tagdef, + /* A typedef name. */ + ctsk_typedef, + /* An ObjC-specific kind of type specifier. */ + ctsk_objc, + /* A typeof specifier, or _Atomic ( type-name ). */ + ctsk_typeof +}; + +/* A type specifier: this structure is created in the parser and + passed to declspecs_add_type only. */ +struct c_typespec { + /* What kind of type specifier this is. */ + enum c_typespec_kind kind; + /* Whether the expression has operands suitable for use in constant + expressions. */ + bool expr_const_operands; + /* The specifier itself. */ + tree spec; + /* An expression to be evaluated before the type specifier, in the + case of typeof specifiers, or NULL otherwise or if no such + expression is required for a particular typeof specifier. In + particular, when typeof is applied to an expression of variably + modified type, that expression must be evaluated in order to + determine array sizes that form part of the type, but the + expression itself (as opposed to the array sizes) forms no part + of the type and so needs to be recorded separately. */ + tree expr; +}; + +/* A storage class specifier. */ +enum c_storage_class { + csc_none, + csc_auto, + csc_extern, + csc_register, + csc_static, + csc_typedef +}; + +/* A type specifier keyword "void", "_Bool", "char", "int", "float", + "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum", + or none of these. */ +enum c_typespec_keyword { + cts_none, + cts_void, + cts_bool, + cts_char, + cts_int, + cts_float, + cts_int_n, + cts_double, + cts_dfloat32, + cts_dfloat64, + cts_dfloat128, + cts_floatn_nx, + cts_fract, + cts_accum, + cts_auto_type +}; + +/* This enum lists all the possible declarator specifiers, storage + class or attribute that a user can write. There is at least one + enumerator per possible declarator specifier in the struct + c_declspecs below. + + It is used to index the array of declspec locations in struct + c_declspecs. */ +enum c_declspec_word { + cdw_typespec /* A catch-all for a typespec. */, + cdw_storage_class /* A catch-all for a storage class */, + cdw_attributes, + cdw_typedef, + cdw_explicit_signed, + cdw_deprecated, + cdw_default_int, + cdw_long, + cdw_long_long, + cdw_short, + cdw_signed, + cdw_unsigned, + cdw_complex, + cdw_inline, + cdw_noreturn, + cdw_thread, + cdw_const, + cdw_volatile, + cdw_restrict, + cdw_atomic, + cdw_saturating, + cdw_alignas, + cdw_address_space, + cdw_gimple, + cdw_rtl, + cdw_number_of_elements /* This one must always be the last + enumerator. */ +}; + +enum c_declspec_il { + cdil_none, + cdil_gimple, /* __GIMPLE */ + cdil_gimple_cfg, /* __GIMPLE(cfg) */ + cdil_gimple_ssa, /* __GIMPLE(ssa) */ + cdil_rtl /* __RTL */ +}; + +/* A sequence of declaration specifiers in C. When a new declaration + specifier is added, please update the enum c_declspec_word above + accordingly. */ +struct c_declspecs { + location_t locations[cdw_number_of_elements]; + /* The type specified, if a single type specifier such as a struct, + union or enum specifier, typedef name or typeof specifies the + whole type, or NULL_TREE if none or a keyword such as "void" or + "char" is used. Does not include qualifiers. */ + tree type; + /* Any expression to be evaluated before the type, from a typeof + specifier. */ + tree expr; + /* The attributes from a typedef decl. */ + tree decl_attr; + /* When parsing, the GNU attributes and prefix standard attributes. + Outside the parser, this will be NULL; attributes (possibly from + multiple lists) will be passed separately. */ + tree attrs; + /* When parsing, postfix standard attributes (which appertain to the + type specified by the preceding declaration specifiers, unlike + prefix standard attributes which appertain to the declaration or + declarations as a whole). */ + tree postfix_attrs; + /* The pass to start compiling a __GIMPLE or __RTL function with. */ + char *gimple_or_rtl_pass; + /* ENTRY BB count. */ + profile_count entry_bb_count; + /* The base-2 log of the greatest alignment required by an _Alignas + specifier, in bytes, or -1 if no such specifiers with nonzero + alignment. */ + int align_log; + /* For the __intN declspec, this stores the index into the int_n_* arrays. */ + int int_n_idx; + /* For the _FloatN and _FloatNx declspec, this stores the index into + the floatn_nx_types array. */ + int floatn_nx_idx; + /* The storage class specifier, or csc_none if none. */ + enum c_storage_class storage_class; + /* Any type specifier keyword used such as "int", not reflecting + modifiers such as "short", or cts_none if none. */ + ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8; + /* The kind of type specifier if one has been seen, ctsk_none + otherwise. */ + ENUM_BITFIELD (c_typespec_kind) typespec_kind : 4; + ENUM_BITFIELD (c_declspec_il) declspec_il : 3; + /* Whether any expressions in typeof specifiers may appear in + constant expressions. */ + BOOL_BITFIELD expr_const_operands : 1; + /* Whether any declaration specifiers have been seen at all. */ + BOOL_BITFIELD declspecs_seen_p : 1; + /* Whether any declaration specifiers other than standard attributes + have been seen at all. If only standard attributes have been + seen, this is an attribute-declaration. */ + BOOL_BITFIELD non_std_attrs_seen_p : 1; + /* Whether something other than a storage class specifier or + attribute has been seen. This is used to warn for the + obsolescent usage of storage class specifiers other than at the + start of the list. (Doing this properly would require function + specifiers to be handled separately from storage class + specifiers.) */ + BOOL_BITFIELD non_sc_seen_p : 1; + /* Whether the type is specified by a typedef or typeof name. */ + BOOL_BITFIELD typedef_p : 1; + /* Whether the type is explicitly "signed" or specified by a typedef + whose type is explicitly "signed". */ + BOOL_BITFIELD explicit_signed_p : 1; + /* Whether the specifiers include a deprecated typedef. */ + BOOL_BITFIELD deprecated_p : 1; + /* Whether the specifiers include an unavailable typedef. */ + BOOL_BITFIELD unavailable_p : 1; + /* Whether the type defaulted to "int" because there were no type + specifiers. */ + BOOL_BITFIELD default_int_p : 1; + /* Whether "long" was specified. */ + BOOL_BITFIELD long_p : 1; + /* Whether "long" was specified more than once. */ + BOOL_BITFIELD long_long_p : 1; + /* Whether "short" was specified. */ + BOOL_BITFIELD short_p : 1; + /* Whether "signed" was specified. */ + BOOL_BITFIELD signed_p : 1; + /* Whether "unsigned" was specified. */ + BOOL_BITFIELD unsigned_p : 1; + /* Whether "complex" was specified. */ + BOOL_BITFIELD complex_p : 1; + /* Whether "inline" was specified. */ + BOOL_BITFIELD inline_p : 1; + /* Whether "_Noreturn" was speciied. */ + BOOL_BITFIELD noreturn_p : 1; + /* Whether "__thread" or "_Thread_local" was specified. */ + BOOL_BITFIELD thread_p : 1; + /* Whether "__thread" rather than "_Thread_local" was specified. */ + BOOL_BITFIELD thread_gnu_p : 1; + /* Whether "const" was specified. */ + BOOL_BITFIELD const_p : 1; + /* Whether "volatile" was specified. */ + BOOL_BITFIELD volatile_p : 1; + /* Whether "restrict" was specified. */ + BOOL_BITFIELD restrict_p : 1; + /* Whether "_Atomic" was specified. */ + BOOL_BITFIELD atomic_p : 1; + /* Whether "_Sat" was specified. */ + BOOL_BITFIELD saturating_p : 1; + /* Whether any alignment specifier (even with zero alignment) was + specified. */ + BOOL_BITFIELD alignas_p : 1; + /* The address space that the declaration belongs to. */ + addr_space_t address_space; +}; + +/* The various kinds of declarators in C. */ +enum c_declarator_kind { + /* An identifier. */ + cdk_id, + /* A function. */ + cdk_function, + /* An array. */ + cdk_array, + /* A pointer. */ + cdk_pointer, + /* Parenthesized declarator with nested attributes. */ + cdk_attrs +}; + +struct c_arg_tag { + /* The argument name. */ + tree id; + /* The type of the argument. */ + tree type; +}; + + +/* Information about the parameters in a function declarator. */ +struct c_arg_info { + /* A list of parameter decls. */ + tree parms; + /* A list of structure, union and enum tags defined. */ + vec *tags; + /* A list of argument types to go in the FUNCTION_TYPE. */ + tree types; + /* A list of non-parameter decls (notably enumeration constants) + defined with the parameters. */ + tree others; + /* A compound expression of VLA sizes from the parameters, or NULL. + In a function definition, these are used to ensure that + side-effects in sizes of arrays converted to pointers (such as a + parameter int i[n++]) take place; otherwise, they are + ignored. */ + tree pending_sizes; + /* True when these arguments had [*]. */ + BOOL_BITFIELD had_vla_unspec : 1; +}; + +/* A declarator. */ +struct c_declarator { + /* The kind of declarator. */ + enum c_declarator_kind kind; + location_t id_loc; /* Currently only set for cdk_id, cdk_array. */ + /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ + struct c_declarator *declarator; + union { + /* For identifiers. */ + struct { + /* An IDENTIFIER_NODE, or NULL_TREE if an abstract + declarator. */ + tree id; + /* Any attributes (which apply to the declaration rather than to + the type described by the outer declarators). */ + tree attrs; + } id; + /* For functions. */ + struct c_arg_info *arg_info; + /* For arrays. */ + struct { + /* The array dimension, or NULL for [] and [*]. */ + tree dimen; + /* The qualifiers inside []. */ + int quals; + /* The attributes (currently ignored) inside []. */ + tree attrs; + /* Whether [static] was used. */ + BOOL_BITFIELD static_p : 1; + /* Whether [*] was used. */ + BOOL_BITFIELD vla_unspec_p : 1; + } array; + /* For pointers, the qualifiers on the pointer type. */ + int pointer_quals; + /* For attributes. */ + tree attrs; + } u; +}; + +/* A type name. */ +struct c_type_name { + /* The declaration specifiers. */ + struct c_declspecs *specs; + /* The declarator. */ + struct c_declarator *declarator; +}; + +/* A parameter. */ +struct c_parm { + /* The declaration specifiers, minus any prefix attributes. */ + struct c_declspecs *specs; + /* The attributes. */ + tree attrs; + /* The declarator. */ + struct c_declarator *declarator; + /* The location of the parameter. */ + location_t loc; +}; + +/* Used when parsing an enum. Initialized by start_enum. */ +struct c_enum_contents +{ + /* While defining an enum type, this is 1 plus the last enumerator + constant value. */ + tree enum_next_value; + + /* Nonzero means that there was overflow computing enum_next_value. */ + int enum_overflow; +}; + +/* A type of reference to a static identifier in an inline + function. */ +enum c_inline_static_type { + /* Identifier with internal linkage used in function that may be an + inline definition (i.e., file-scope static). */ + csi_internal, + /* Modifiable object with static storage duration defined in + function that may be an inline definition (i.e., local + static). */ + csi_modifiable +}; + + +/* in c-parser.cc */ +extern void c_parse_init (void); +extern bool c_keyword_starts_typename (enum rid keyword); + +/* in c-aux-info.cc */ +extern void gen_aux_info_record (tree, int, int, int); + +/* in c-decl.cc */ +struct c_spot_bindings; +class c_struct_parse_info; +extern struct obstack parser_obstack; +/* Set to IN_ITERATION_STMT if parsing an iteration-statement, + to IN_OMP_BLOCK if parsing OpenMP structured block and + IN_OMP_FOR if parsing OpenMP loop. If parsing a switch statement, + this is bitwise ORed with IN_SWITCH_STMT, unless parsing an + iteration-statement, OpenMP block or loop within that switch. */ +#define IN_SWITCH_STMT 1 +#define IN_ITERATION_STMT 2 +#define IN_OMP_BLOCK 4 +#define IN_OMP_FOR 8 +#define IN_OBJC_FOREACH 16 +extern unsigned char in_statement; + +extern bool switch_statement_break_seen_p; + +extern bool global_bindings_p (void); +extern tree pushdecl (tree); +extern void push_scope (void); +extern tree pop_scope (void); +extern void c_bindings_start_stmt_expr (struct c_spot_bindings *); +extern void c_bindings_end_stmt_expr (struct c_spot_bindings *); + +extern void record_inline_static (location_t, tree, tree, + enum c_inline_static_type); +extern void c_init_decl_processing (void); +extern void c_print_identifier (FILE *, tree, int); +extern int quals_from_declspecs (const struct c_declspecs *); +extern struct c_declarator *build_array_declarator (location_t, tree, + struct c_declspecs *, + bool, bool); +extern tree build_enumerator (location_t, location_t, struct c_enum_contents *, + tree, tree); +extern tree check_for_loop_decls (location_t, bool); +extern void mark_forward_parm_decls (void); +extern void declare_parm_level (void); +extern void undeclared_variable (location_t, tree); +extern tree lookup_label_for_goto (location_t, tree); +extern tree declare_label (tree); +extern tree define_label (location_t, tree); +extern struct c_spot_bindings *c_get_switch_bindings (void); +extern void c_release_switch_bindings (struct c_spot_bindings *); +extern bool c_check_switch_jump_warnings (struct c_spot_bindings *, + location_t, location_t); +extern void finish_decl (tree, location_t, tree, tree, tree); +extern tree finish_enum (tree, tree, tree); +extern void finish_function (location_t = input_location); +extern tree finish_struct (location_t, tree, tree, tree, + class c_struct_parse_info *); +extern tree c_simulate_enum_decl (location_t, const char *, + vec *); +extern tree c_simulate_record_decl (location_t, const char *, + array_slice); +extern struct c_arg_info *build_arg_info (void); +extern struct c_arg_info *get_parm_info (bool, tree); +extern tree grokfield (location_t, struct c_declarator *, + struct c_declspecs *, tree, tree *); +extern tree groktypename (struct c_type_name *, tree *, bool *); +extern tree grokparm (const struct c_parm *, tree *); +extern tree implicitly_declare (location_t, tree); +extern void keep_next_level (void); +extern void pending_xref_error (void); +extern void c_push_function_context (void); +extern void c_pop_function_context (void); +extern void push_parm_decl (const struct c_parm *, tree *); +extern struct c_declarator *set_array_declarator_inner (struct c_declarator *, + struct c_declarator *); +extern tree c_builtin_function (tree); +extern tree c_builtin_function_ext_scope (tree); +extern tree c_simulate_builtin_function_decl (tree); +extern void c_warn_unused_attributes (tree); +extern tree c_warn_type_attributes (tree); +extern void shadow_tag (const struct c_declspecs *); +extern void shadow_tag_warned (const struct c_declspecs *, int); +extern tree start_enum (location_t, struct c_enum_contents *, tree); +extern bool start_function (struct c_declspecs *, struct c_declarator *, tree); +extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool, + tree, location_t * = NULL); +extern tree start_struct (location_t, enum tree_code, tree, + class c_struct_parse_info **); +extern void store_parm_decls (void); +extern void store_parm_decls_from (struct c_arg_info *); +extern void temp_store_parm_decls (tree, tree); +extern void temp_pop_parm_decls (void); +extern tree xref_tag (enum tree_code, tree); +extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree, + bool, tree); +extern struct c_parm *build_c_parm (struct c_declspecs *, tree, + struct c_declarator *, location_t); +extern struct c_declarator *build_attrs_declarator (tree, + struct c_declarator *); +extern struct c_declarator *build_function_declarator (struct c_arg_info *, + struct c_declarator *); +extern struct c_declarator *build_id_declarator (tree); +extern struct c_declarator *make_pointer_declarator (struct c_declspecs *, + struct c_declarator *); +extern struct c_declspecs *build_null_declspecs (void); +extern struct c_declspecs *declspecs_add_qual (location_t, + struct c_declspecs *, tree); +extern struct c_declspecs *declspecs_add_type (location_t, + struct c_declspecs *, + struct c_typespec); +extern struct c_declspecs *declspecs_add_scspec (location_t, + struct c_declspecs *, tree); +extern struct c_declspecs *declspecs_add_attrs (location_t, + struct c_declspecs *, tree); +extern struct c_declspecs *declspecs_add_addrspace (location_t, + struct c_declspecs *, + addr_space_t); +extern struct c_declspecs *declspecs_add_alignas (location_t, + struct c_declspecs *, tree); +extern struct c_declspecs *finish_declspecs (struct c_declspecs *); + +/* in c-objc-common.cc */ +extern bool c_objc_common_init (void); +extern bool c_missing_noreturn_ok_p (tree); +extern bool c_warn_unused_global_decl (const_tree); +extern void c_initialize_diagnostics (diagnostic_context *); +extern bool c_vla_unspec_p (tree x, tree fn); +extern alias_set_type c_get_alias_set (tree); + +/* in c-typeck.cc */ +extern int in_alignof; +extern int in_sizeof; +extern int in_typeof; +extern bool c_in_omp_for; + +extern tree c_last_sizeof_arg; +extern location_t c_last_sizeof_loc; + +extern struct c_switch *c_switch_stack; + +extern bool char_type_p (tree); +extern tree c_objc_common_truthvalue_conversion (location_t, tree); +extern tree require_complete_type (location_t, tree); +extern bool same_translation_unit_p (const_tree, const_tree); +extern int comptypes (tree, tree); +extern int comptypes_check_different_types (tree, tree, bool *); +extern bool c_vla_type_p (const_tree); +extern bool c_mark_addressable (tree, bool = false); +extern void c_incomplete_type_error (location_t, const_tree, const_tree); +extern tree c_type_promotes_to (tree); +extern struct c_expr default_function_array_conversion (location_t, + struct c_expr); +extern struct c_expr default_function_array_read_conversion (location_t, + struct c_expr); +extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr, + bool, bool); +extern tree decl_constant_value_1 (tree, bool); +extern void mark_exp_read (tree); +extern tree composite_type (tree, tree); +extern tree build_component_ref (location_t, tree, tree, location_t); +extern tree build_array_ref (location_t, tree, tree); +extern tree build_external_ref (location_t, tree, bool, tree *); +extern void pop_maybe_used (bool); +extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr); +extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *); +extern struct c_expr parser_build_unary_op (location_t, enum tree_code, + struct c_expr); +extern struct c_expr parser_build_binary_op (location_t, + enum tree_code, struct c_expr, + struct c_expr); +extern tree build_conditional_expr (location_t, tree, bool, tree, tree, + location_t, tree, tree, location_t); +extern tree build_compound_expr (location_t, tree, tree); +extern tree c_cast_expr (location_t, struct c_type_name *, tree); +extern tree build_c_cast (location_t, tree, tree); +extern void store_init_value (location_t, tree, tree, tree); +extern void maybe_warn_string_init (location_t, tree, struct c_expr); +extern void start_init (tree, tree, int, rich_location *); +extern void finish_init (void); +extern void really_start_incremental_init (tree); +extern void finish_implicit_inits (location_t, struct obstack *); +extern void push_init_level (location_t, int, struct obstack *); +extern struct c_expr pop_init_level (location_t, int, struct obstack *, + location_t); +extern void set_init_index (location_t, tree, tree, struct obstack *); +extern void set_init_label (location_t, tree, location_t, struct obstack *); +extern void process_init_element (location_t, struct c_expr, bool, + struct obstack *); +extern tree build_compound_literal (location_t, tree, tree, bool, + unsigned int); +extern void check_compound_literal_type (location_t, struct c_type_name *); +extern tree c_start_switch (location_t, location_t, tree, bool); +extern void c_finish_switch (tree, tree); +extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool, + bool); +extern tree build_asm_stmt (bool, tree); +extern int c_types_compatible_p (tree, tree); +extern tree c_begin_compound_stmt (bool); +extern tree c_end_compound_stmt (location_t, tree, bool); +extern void c_finish_if_stmt (location_t, tree, tree, tree); +extern void c_finish_loop (location_t, location_t, tree, location_t, tree, + tree, tree, tree, bool); +extern tree c_begin_stmt_expr (void); +extern tree c_finish_stmt_expr (location_t, tree); +extern tree c_process_expr_stmt (location_t, tree); +extern tree c_finish_expr_stmt (location_t, tree); +extern tree c_finish_return (location_t, tree, tree); +extern tree c_finish_bc_stmt (location_t, tree, bool); +extern tree c_finish_goto_label (location_t, tree); +extern tree c_finish_goto_ptr (location_t, c_expr val); +extern tree c_expr_to_decl (tree, bool *, bool *); +extern tree c_finish_omp_construct (location_t, enum tree_code, tree, tree); +extern tree c_finish_oacc_data (location_t, tree, tree); +extern tree c_finish_oacc_host_data (location_t, tree, tree); +extern tree c_begin_omp_parallel (void); +extern tree c_finish_omp_parallel (location_t, tree, tree); +extern tree c_begin_omp_task (void); +extern tree c_finish_omp_task (location_t, tree, tree); +extern void c_finish_omp_cancel (location_t, tree); +extern void c_finish_omp_cancellation_point (location_t, tree); +extern tree c_finish_omp_clauses (tree, enum c_omp_region_type); +extern tree c_build_va_arg (location_t, tree, location_t, tree); +extern tree c_finish_transaction (location_t, tree, int); +extern bool c_tree_equal (tree, tree); +extern tree c_build_function_call_vec (location_t, const vec&, + tree, vec *, + vec *); +extern tree c_omp_clause_copy_ctor (tree, tree, tree); + +/* Set to 0 at beginning of a function definition, set to 1 if + a return statement that specifies a return value is seen. */ + +extern int current_function_returns_value; + +/* Set to 0 at beginning of a function definition, set to 1 if + a return statement with no argument is seen. */ + +extern int current_function_returns_null; + +/* Set to 0 at beginning of a function definition, set to 1 if + a call to a noreturn function is seen. */ + +extern int current_function_returns_abnormally; + +/* In c-decl.cc */ + +/* Tell the binding oracle what kind of binding we are looking for. */ + +enum c_oracle_request +{ + C_ORACLE_SYMBOL, + C_ORACLE_TAG, + C_ORACLE_LABEL +}; + +/* If this is non-NULL, then it is a "binding oracle" which can lazily + create bindings when needed by the C compiler. The oracle is told + the name and type of the binding to create. It can call pushdecl + or the like to ensure the binding is visible; or do nothing, + leaving the binding untouched. c-decl.cc takes note of when the + oracle has been called and will not call it again if it fails to + create a given binding. */ + +typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier); + +extern c_binding_oracle_function *c_binding_oracle; + +extern void c_finish_incomplete_decl (tree); +extern tree c_omp_reduction_id (enum tree_code, tree); +extern tree c_omp_reduction_decl (tree); +extern tree c_omp_reduction_lookup (tree, tree); +extern tree c_check_omp_declare_reduction_r (tree *, int *, void *); +extern bool c_check_in_current_scope (tree); +extern void c_pushtag (location_t, tree, tree); +extern void c_bind (location_t, tree, bool); +extern bool tag_exists_p (enum tree_code, tree); + +/* In c-errors.cc */ +extern bool pedwarn_c90 (location_t, int opt, const char *, ...) + ATTRIBUTE_GCC_DIAG(3,4); +extern bool pedwarn_c99 (location_t, int opt, const char *, ...) + ATTRIBUTE_GCC_DIAG(3,4); +extern bool pedwarn_c11 (location_t, int opt, const char *, ...) + ATTRIBUTE_GCC_DIAG(3,4); + +extern void +set_c_expr_source_range (c_expr *expr, + location_t start, location_t finish); + +extern void +set_c_expr_source_range (c_expr *expr, + source_range src_range); + +/* In c-fold.cc */ +extern vec incomplete_record_decls; + +#if CHECKING_P +namespace selftest { + extern void run_c_tests (void); +} // namespace selftest +#endif /* #if CHECKING_P */ + + +#endif /* ! GCC_C_TREE_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/calls.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/calls.h new file mode 100644 index 0000000..fd7836e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/calls.h @@ -0,0 +1,138 @@ +/* Declarations and data types for RTL call insn generation. + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CALLS_H +#define GCC_CALLS_H + +/* Describes a function argument. + + Each argument conceptually has a gimple-level type. Usually this type + is available directly as a tree via the TYPE field, but when calling + libgcc support functions it might instead be inferred from a mode, + in which case the type isn't available directly. + + This gimple-level type might go through promotion before being passed to + the target function. Depending on the context, the MODE field is either + the mode of the gimple-level type (whether explicitly given or not) + or the mode after promotion has been performed. */ +class function_arg_info +{ +public: + function_arg_info () + : type (NULL_TREE), mode (VOIDmode), named (false), + pass_by_reference (false) + {} + + /* Initialize an argument of mode MODE, either before or after promotion. */ + function_arg_info (machine_mode mode, bool named) + : type (NULL_TREE), mode (mode), named (named), pass_by_reference (false) + {} + + /* Initialize an unpromoted argument of type TYPE. */ + function_arg_info (tree type, bool named) + : type (type), mode (TYPE_MODE (type)), named (named), + pass_by_reference (false) + {} + + /* Initialize an argument with explicit properties. */ + function_arg_info (tree type, machine_mode mode, bool named) + : type (type), mode (mode), named (named), pass_by_reference (false) + {} + + /* Return true if the gimple-level type is an aggregate. */ + bool aggregate_type_p () const { return type && AGGREGATE_TYPE_P (type); } + + /* Return the size of the gimple-level type, or -1 if the size is + variable or otherwise not representable as a poly_int64. + + Use this function when MODE is the mode of the type before promotion, + or in any context if the target never promotes function arguments. */ + poly_int64 type_size_in_bytes () const + { + if (type) + return int_size_in_bytes (type); + return GET_MODE_SIZE (mode); + } + + /* Return the size of the argument after promotion, or -1 if the size + is variable or otherwise not representable as a poly_int64. + + Use this function when MODE is the mode of the type after promotion. */ + poly_int64 promoted_size_in_bytes () const + { + if (mode == BLKmode) + return int_size_in_bytes (type); + return GET_MODE_SIZE (mode); + } + + /* True if the argument represents the end of the argument list, + as returned by end_marker (). */ + bool end_marker_p () const { return mode == VOIDmode; } + + /* Return a function_arg_info that represents the end of the + argument list. */ + static function_arg_info end_marker () + { + return function_arg_info (void_type_node, /*named=*/true); + } + + /* The type of the argument, or null if not known (which is true for + libgcc support functions). */ + tree type; + + /* The mode of the argument. Depending on context, this might be + the mode of the argument type or the mode after promotion. */ + machine_mode mode; + + /* True if the argument is treated as a named argument, false if it is + treated as an unnamed variadic argument (i.e. one passed through + "..."). See also TARGET_STRICT_ARGUMENT_NAMING. */ + unsigned int named : 1; + + /* True if we have decided to pass the argument by reference, in which case + the function_arg_info describes a pointer to the original argument. */ + unsigned int pass_by_reference : 1; +}; + +extern int flags_from_decl_or_type (const_tree); +extern int call_expr_flags (const_tree); +extern int setjmp_call_p (const_tree); +extern bool gimple_maybe_alloca_call_p (const gimple *); +extern bool gimple_alloca_call_p (const gimple *); +extern bool alloca_call_p (const_tree); +extern bool must_pass_in_stack_var_size (const function_arg_info &); +extern bool must_pass_in_stack_var_size_or_pad (const function_arg_info &); +extern bool must_pass_va_arg_in_stack (tree); +extern rtx prepare_call_address (tree, rtx, rtx, rtx *, int, int); +extern bool shift_return_value (machine_mode, bool, rtx); +extern rtx expand_call (tree, rtx, int); +extern void fixup_tail_calls (void); + +extern bool pass_by_reference (CUMULATIVE_ARGS *, function_arg_info); +extern bool pass_va_arg_by_reference (tree); +extern bool apply_pass_by_reference_rules (CUMULATIVE_ARGS *, + function_arg_info &); +extern bool reference_callee_copied (CUMULATIVE_ARGS *, + const function_arg_info &); +extern void maybe_complain_about_tail_call (tree, const char *); + +extern rtx rtx_for_static_chain (const_tree, bool); +extern bool cxx17_empty_base_field_p (const_tree); + +#endif // GCC_CALLS_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ccmp.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ccmp.h new file mode 100644 index 0000000..1799d5f --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ccmp.h @@ -0,0 +1,25 @@ +/* Conditional comapre related functions. + Copyright (C) 2014-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CCMP_H +#define GCC_CCMP_H + +extern rtx expand_ccmp_expr (gimple *, machine_mode); + +#endif /* GCC_CCMP_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfg-flags.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfg-flags.def new file mode 100644 index 0000000..a2c0676 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfg-flags.def @@ -0,0 +1,191 @@ +/* Flags on basic blocks and edges. + Copyright (C) 2012-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* This file defines flags that may appear on basic blocks or on + edges. Source files define DEF_BASIC_BLOCK_FLAG or DEF_EDGE_FLAG + appropriately before including this file. */ + +#if !defined(DEF_BASIC_BLOCK_FLAG) && !defined(DEF_EDGE_FLAG) +#error "You must define DEF_BASIC_BLOCK_FLAG or DEF_EDGE_FLAG" +#endif + +#ifdef DEF_BASIC_BLOCK_FLAG + +/* Masks for basic_block.flags. + + The format of this file is: DEF_BASIC_BLOCK_FLAG(NAME, IDX). + NAME is the name of the basic block flag. A flag BB_#NAME will be + created and the name is used in dump_edge_info. + IDX is a sequence number that is used to determine the value + of the flag, which is 1 << IDX). + + BB_HOT_PARTITION and BB_COLD_PARTITION should be preserved throughout + the compilation, so they are never cleared. + + All other flags may be cleared by clear_bb_flags(). It is generally + a bad idea to rely on any flags being up-to-date. */ + +/* Only set on blocks that have just been created by create_bb. */ +DEF_BASIC_BLOCK_FLAG(NEW, 0) + +/* Set by find_unreachable_blocks. Do not rely on this being set in any + pass. */ +DEF_BASIC_BLOCK_FLAG(REACHABLE, 1) + +/* Set for blocks in an irreducible loop by loop analysis. */ +DEF_BASIC_BLOCK_FLAG(IRREDUCIBLE_LOOP, 2) + +/* Set on blocks that may actually not be single-entry single-exit block. */ +DEF_BASIC_BLOCK_FLAG(SUPERBLOCK, 3) + +/* Set on basic blocks that the scheduler should not touch. This is used + by SMS to prevent other schedulers from messing with the loop schedule. */ +DEF_BASIC_BLOCK_FLAG(DISABLE_SCHEDULE, 4) + +/* Set on blocks that should be put in a hot section. */ +DEF_BASIC_BLOCK_FLAG(HOT_PARTITION, 5) + +/* Set on blocks that should be put in a cold section. */ +DEF_BASIC_BLOCK_FLAG(COLD_PARTITION, 6) + +/* Set on block that was duplicated. */ +DEF_BASIC_BLOCK_FLAG(DUPLICATED, 7) + +/* Set if the label at the top of this block is the target of a non-local goto. */ +DEF_BASIC_BLOCK_FLAG(NON_LOCAL_GOTO_TARGET, 8) + +/* Set on blocks that are in RTL format. */ +DEF_BASIC_BLOCK_FLAG(RTL, 9) + +/* Set on blocks that are forwarder blocks. + Only used in cfgcleanup.cc. */ +DEF_BASIC_BLOCK_FLAG(FORWARDER_BLOCK, 10) + +/* Set on blocks that cannot be threaded through. + Only used for jump threading. */ +DEF_BASIC_BLOCK_FLAG(NONTHREADABLE_BLOCK, 11) + +/* Set on blocks that were modified in some way. This bit is set in + df_set_bb_dirty, but not cleared by df_analyze, so it can be used + to test whether a block has been modified prior to a df_analyze call. */ +DEF_BASIC_BLOCK_FLAG(MODIFIED, 12) + +/* A general visited flag for passes to use. */ +DEF_BASIC_BLOCK_FLAG(VISITED, 13) + +/* Set on blocks that are in a transaction. This is calculated on + demand, and is available after calling compute_transaction_bits(). */ +DEF_BASIC_BLOCK_FLAG(IN_TRANSACTION, 14) + +#endif + +#ifdef DEF_EDGE_FLAG + +/* Masks for edge.flags. + + The format of this file is: DEF_EDGE_FLAG(NAME, IDX, STRING). + NAME is the name of the edge flag. A flag EDGE_#NAME will be + created and the name is used in dump_edge_info. + IDX is a sequence number that is used to determine the value + of the flag, which is 1 << IDX). */ + +/* 'Straight line' flow. In GIMPLE and in cfglayout mode, all normal + edges are fallthru edges. In cfgrtl mode, this flag really means + that control flow falls through to the next basic block in the line. */ +DEF_EDGE_FLAG(FALLTHRU, 0) + +/* Strange flow, like a computed jump or exception handling. Usually + this means that the edge cannot be split. */ +DEF_EDGE_FLAG(ABNORMAL, 1) + +/* Edge out of a basic block that ends with a CALL_INSN with abnormal + exit, like an exception or a non-local goto. + ABNORMAL_CALL edges also have ABNORMAL set. + This flag is only used for the RTL CFG. */ +DEF_EDGE_FLAG(ABNORMAL_CALL, 2) + +/* Exception edge. Exception handling edges represent possible control + transfers from a trapping instruction to an exception handler. + EH edges also have ABNORMAL set for the RTL CFG. */ +DEF_EDGE_FLAG(EH, 3) + +/* Never merge blocks via this edge. This is used for exception handling, + to prevent merging away edges to the post-landing-pad basic block. + This flag is only used for the RTL CFG. */ +DEF_EDGE_FLAG(PRESERVE, 4) + +/* Not a real edge. This is used to connect parts of the CFG that do + not halt, such as infinite loops and noreturn functions, to the + EXIT_BLOCK, so that traversing of the reverse CFG is possible. */ +DEF_EDGE_FLAG(FAKE, 5) + +/* A back edge, marked in a depth-first search of the CFG. Back edges + are hints that this edge may be part of a loop in the CFG. */ +DEF_EDGE_FLAG(DFS_BACK, 6) + +/* Edge in a part of the CFG that is an irreducible loop. */ +DEF_EDGE_FLAG(IRREDUCIBLE_LOOP, 7) + +/* Edge taken when controlling predicate is nonzero. + This is only used for the GIMPLE CFG. */ +DEF_EDGE_FLAG(TRUE_VALUE, 8) + +/* Edge taken when controlling predicate is zero. + This is only used for the GIMPLE CFG. */ +DEF_EDGE_FLAG(FALSE_VALUE, 9) + +/* Edge is executable. This is only used in GIMPLE SSA-CCP and VRP. + This is only used for the GIMPLE CFG. */ +DEF_EDGE_FLAG(EXECUTABLE, 10) + +/* Edge crosses between hot and cold sections, when we do partitioning. + This flag is only used for the RTL CFG. */ +DEF_EDGE_FLAG(CROSSING, 11) + +/* Edge from a sibcall CALL_INSN to exit. + SIBCALL edges also have ABNORMAL set. + This flag is only used for the RTL CFG. */ +DEF_EDGE_FLAG(SIBCALL, 12) + +/* Candidate for straight line flow. Only used in bb-reorder.cc. + This flag is only used for the RTL CFG. */ +DEF_EDGE_FLAG(CAN_FALLTHRU, 13) + +/* Exit of a loop. This is only used in ifcvt.cc. + This flag is only used for the RTL CFG. */ +DEF_EDGE_FLAG(LOOP_EXIT, 14) + +/* Uninstrumented edge out of a GIMPLE_TRANSACTION statement. */ +DEF_EDGE_FLAG(TM_UNINSTRUMENTED, 15) + +/* Abort (over) edge out of a GIMPLE_TRANSACTION statement. */ +DEF_EDGE_FLAG(TM_ABORT, 16) + +/* An edge we should ignore. It should be entirely local to + passes. ie, it is never set on any edge upon the completion + of any pass. */ +DEF_EDGE_FLAG(IGNORE, 17) + +#endif + +/* +Local variables: +mode:c +End: +*/ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfg.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfg.h new file mode 100644 index 0000000..6c821d4 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfg.h @@ -0,0 +1,186 @@ +/* Control flow graph manipulation code header file. + Copyright (C) 2014-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CFG_H +#define GCC_CFG_H + +#include "dominance.h" + +/* What sort of profiling information we have. */ +enum profile_status_d +{ + PROFILE_ABSENT, + PROFILE_GUESSED, + PROFILE_READ, + PROFILE_LAST /* Last value, used by profile streaming. */ +}; + +/* A structure to group all the per-function control flow graph data. + The x_* prefixing is necessary because otherwise references to the + fields of this struct are interpreted as the defines for backward + source compatibility following the definition of this struct. */ +struct GTY(()) control_flow_graph { + /* Block pointers for the exit and entry of a function. + These are always the head and tail of the basic block list. */ + basic_block x_entry_block_ptr; + basic_block x_exit_block_ptr; + + /* Index by basic block number, get basic block struct info. */ + vec *x_basic_block_info; + + /* Number of basic blocks in this flow graph. */ + int x_n_basic_blocks; + + /* Number of edges in this flow graph. */ + int x_n_edges; + + /* The first free basic block number. */ + int x_last_basic_block; + + /* UIDs for LABEL_DECLs. */ + int last_label_uid; + + /* Mapping of labels to their associated blocks. At present + only used for the gimple CFG. */ + vec *x_label_to_block_map; + + enum profile_status_d x_profile_status; + + /* Whether the dominators and the postdominators are available. */ + enum dom_state x_dom_computed[2]; + + /* Number of basic blocks in the dominance tree. */ + unsigned x_n_bbs_in_dom_tree[2]; + + /* Maximal number of entities in the single jumptable. Used to estimate + final flowgraph size. */ + int max_jumptable_ents; + + /* Maximal count of BB in function. */ + profile_count count_max; + + /* Dynamically allocated edge/bb flags. */ + int edge_flags_allocated; + int bb_flags_allocated; +}; + + +extern void init_flow (function *); +extern void free_cfg (function *); +extern basic_block alloc_block (void); +extern void link_block (basic_block, basic_block); +extern void unlink_block (basic_block); +extern void compact_blocks (void); +extern void expunge_block (basic_block); +extern edge unchecked_make_edge (basic_block, basic_block, int); +extern edge cached_make_edge (sbitmap, basic_block, basic_block, int); +extern edge make_edge (basic_block, basic_block, int); +extern edge make_single_succ_edge (basic_block, basic_block, int); +extern void remove_edge_raw (edge); +extern void redirect_edge_succ (edge, basic_block); +extern void redirect_edge_pred (edge, basic_block); +extern void clear_bb_flags (void); +extern void dump_edge_info (FILE *, edge, dump_flags_t, int); +extern void debug (edge_def &ref); +extern void debug (edge_def *ptr); +extern void alloc_aux_for_blocks (int); +extern void clear_aux_for_blocks (void); +extern void free_aux_for_blocks (void); +extern void alloc_aux_for_edge (edge, int); +extern void alloc_aux_for_edges (int); +extern void clear_aux_for_edges (void); +extern void free_aux_for_edges (void); +extern void debug_bb (basic_block); +extern basic_block debug_bb_n (int); +extern void debug_bb (basic_block, dump_flags_t); +extern basic_block debug_bb_n (int, dump_flags_t); +extern void dump_bb_info (FILE *, basic_block, int, dump_flags_t, bool, bool); +extern void brief_dump_cfg (FILE *, dump_flags_t); +extern void update_bb_profile_for_threading (basic_block, profile_count, edge); +extern void scale_bbs_frequencies_profile_count (basic_block *, int, + profile_count, profile_count); +extern void scale_bbs_frequencies (basic_block *, int, profile_probability); +extern void initialize_original_copy_tables (void); +extern void reset_original_copy_tables (void); +extern void free_original_copy_tables (void); +extern bool original_copy_tables_initialized_p (void); +extern void set_bb_original (basic_block, basic_block); +extern basic_block get_bb_original (basic_block); +extern void set_bb_copy (basic_block, basic_block); +extern basic_block get_bb_copy (basic_block); +void set_loop_copy (class loop *, class loop *); +class loop *get_loop_copy (class loop *); + +/* Generic RAII class to allocate a bit from storage of integer type T. + The allocated bit is accessible as mask with the single bit set + via the conversion operator to T. */ + +template +class auto_flag +{ +public: + /* static assert T is integer type of max HOST_WIDE_INT precision. */ + auto_flag (T *sptr) + { + m_sptr = sptr; + int free_bit = ffs_hwi (~*sptr); + /* If there are no unset bits... */ + if (free_bit == 0) + gcc_unreachable (); + m_flag = HOST_WIDE_INT_1U << (free_bit - 1); + /* ...or if T is signed and thus the complement is sign-extended, + check if we ran out of bits. We could spare us this bit + if we could use C++11 std::make_unsigned::type to pass + ~*sptr to ffs_hwi. */ + if (m_flag == 0) + gcc_unreachable (); + gcc_checking_assert ((*sptr & m_flag) == 0); + *sptr |= m_flag; + } + ~auto_flag () + { + gcc_checking_assert ((*m_sptr & m_flag) == m_flag); + *m_sptr &= ~m_flag; + } + operator T () const { return m_flag; } +private: + T *m_sptr; + T m_flag; +}; + +/* RAII class to allocate an edge flag for temporary use. You have + to clear the flag from all edges when you are finished using it. */ + +class auto_edge_flag : public auto_flag +{ +public: + auto_edge_flag (function *fun) + : auto_flag (&fun->cfg->edge_flags_allocated) {} +}; + +/* RAII class to allocate a bb flag for temporary use. You have + to clear the flag from all edges when you are finished using it. */ +class auto_bb_flag : public auto_flag +{ +public: + auto_bb_flag (function *fun) + : auto_flag (&fun->cfg->bb_flags_allocated) {} +}; + +#endif /* GCC_CFG_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfganal.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfganal.h new file mode 100644 index 0000000..bb40239 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfganal.h @@ -0,0 +1,90 @@ +/* Control flow graph analysis header file. + Copyright (C) 2014-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + + +#ifndef GCC_CFGANAL_H +#define GCC_CFGANAL_H + +/* This structure maintains an edge list vector. */ +/* FIXME: Make this a vec. */ +struct edge_list +{ + int num_edges; + edge *index_to_edge; +}; + + +/* Class to compute and manage control dependences on an edge-list. */ +class control_dependences +{ +public: + control_dependences (); + ~control_dependences (); + bitmap get_edges_dependent_on (int); + basic_block get_edge_src (int); + basic_block get_edge_dest (int); + +private: + void set_control_dependence_map_bit (basic_block, int); + void clear_control_dependence_bitmap (basic_block); + void find_control_dependence (int); + vec control_dependence_map; + vec > m_el; + bitmap_obstack m_bitmaps; +}; + +extern bool mark_dfs_back_edges (struct function *); +extern bool mark_dfs_back_edges (void); +extern void verify_marked_backedges (struct function *); +extern void find_unreachable_blocks (void); +extern void verify_no_unreachable_blocks (void); +struct edge_list * create_edge_list (void); +void free_edge_list (struct edge_list *); +void print_edge_list (FILE *, struct edge_list *); +void verify_edge_list (FILE *, struct edge_list *); +edge find_edge (basic_block, basic_block); +int find_edge_index (struct edge_list *, basic_block, basic_block); +extern void remove_fake_edges (void); +extern void remove_fake_exit_edges (void); +extern void add_noreturn_fake_exit_edges (void); +extern void connect_infinite_loops_to_exit (void); +extern int post_order_compute (int *, bool, bool); +extern basic_block dfs_find_deadend (basic_block); +extern void inverted_post_order_compute (vec *postorder, sbitmap *start_points = 0); +extern int pre_and_rev_post_order_compute_fn (struct function *, + int *, int *, bool); +extern int pre_and_rev_post_order_compute (int *, int *, bool); +extern int rev_post_order_and_mark_dfs_back_seme (struct function *, edge, + bitmap, bool, int *, + vec > *); +extern int dfs_enumerate_from (basic_block, int, + bool (*)(const_basic_block, const void *), + basic_block *, int, const void *); +extern void compute_dominance_frontiers (class bitmap_head *); +extern bitmap compute_idf (bitmap, class bitmap_head *); +extern void bitmap_intersection_of_succs (sbitmap, sbitmap *, basic_block); +extern void bitmap_intersection_of_preds (sbitmap, sbitmap *, basic_block); +extern void bitmap_union_of_succs (sbitmap, sbitmap *, basic_block); +extern void bitmap_union_of_preds (sbitmap, sbitmap *, basic_block); +extern basic_block * single_pred_before_succ_order (void); +extern edge single_incoming_edge_ignoring_loop_edges (basic_block, bool); +extern edge single_pred_edge_ignoring_loop_edges (basic_block, bool); + + +#endif /* GCC_CFGANAL_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgbuild.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgbuild.h new file mode 100644 index 0000000..85145da --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgbuild.h @@ -0,0 +1,28 @@ +/* Control flow graph building header file. + Copyright (C) 2014-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CFGBUILD_H +#define GCC_CFGBUILD_H + +extern bool inside_basic_block_p (const rtx_insn *); +extern bool control_flow_insn_p (const rtx_insn *); +extern void rtl_make_eh_edge (sbitmap, basic_block, rtx); +extern void find_many_sub_basic_blocks (sbitmap); + +#endif /* GCC_CFGBUILD_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgcleanup.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgcleanup.h new file mode 100644 index 0000000..a6d882f --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgcleanup.h @@ -0,0 +1,34 @@ +/* Control flow optimization header file. + Copyright (C) 2014-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + + +#ifndef GCC_CFGCLEANUP_H +#define GCC_CFGCLEANUP_H + +enum replace_direction { dir_none, dir_forward, dir_backward, dir_both }; + +extern int flow_find_cross_jump (basic_block, basic_block, rtx_insn **, + rtx_insn **, enum replace_direction*); +extern int flow_find_head_matching_sequence (basic_block, basic_block, + rtx_insn **, rtx_insn **, int); +extern bool delete_unreachable_blocks (void); +extern void delete_dead_jumptables (void); +extern bool cleanup_cfg (int); + +#endif /* GCC_CFGCLEANUP_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgexpand.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgexpand.h new file mode 100644 index 0000000..96598ae --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgexpand.h @@ -0,0 +1,28 @@ +/* Header file for lowering trees to RTL. + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CFGEXPAND_H +#define GCC_CFGEXPAND_H + +extern tree gimple_assign_rhs_to_tree (gimple *); +extern HOST_WIDE_INT estimated_stack_frame_size (struct cgraph_node *); +extern void set_parm_rtl (tree, rtx); + + +#endif /* GCC_CFGEXPAND_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfghooks.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfghooks.h new file mode 100644 index 0000000..0982d70 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfghooks.h @@ -0,0 +1,289 @@ +/* Hooks for cfg representation specific functions. + Copyright (C) 2003-2022 Free Software Foundation, Inc. + Contributed by Sebastian Pop + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CFGHOOKS_H +#define GCC_CFGHOOKS_H + +#include "predict.h" + +/* Structure to gather statistic about profile consistency, per pass. + An array of this structure, indexed by pass static number, is allocated + in passes.cc. The structure is defined here so that different CFG modes + can do their book-keeping via CFG hooks. + + For every field[2], field[0] is the count before the pass runs, and + field[1] is the post-pass count. This allows us to monitor the effect + of each individual pass on the profile consistency. + + This structure is not supposed to be used by anything other than passes.cc + and one CFG hook per CFG mode. */ +struct profile_record +{ + /* A weighted cost of the run-time of the function body. */ + double time; + /* Frequency of execution of basic blocks where sum(prob) of the block's + predecessors doesn't match reasonably probability 1. */ + double dyn_mismatched_prob_out; + /* Frequency of execution basic blocks where sum(count) of the block's + predecessors doesn't match reasonably well with the incoming frequency. */ + double dyn_mismatched_count_in; + /* The number of basic blocks where sum(prob) of the block's predecessors + doesn't match reasonably probability 1. */ + int num_mismatched_prob_out; + /* The number of basic blocks where sum(count) of the block's predecessors + doesn't match reasonably well with the incoming frequency. */ + int num_mismatched_count_in; + /* A weighted cost of the size of the function body. */ + int size; + /* True iff this pass actually was run. */ + bool run; + bool fdo; +}; + +typedef int_hash dependence_hash; + +/* Optional data for duplicate_block. */ + +class copy_bb_data +{ +public: + copy_bb_data() : dependence_map (NULL) {} + ~copy_bb_data () { delete dependence_map; } + + /* A map from the copied BBs dependence info cliques to + equivalents in the BBs duplicated to. */ + hash_map *dependence_map; +}; + +struct cfg_hooks +{ + /* Name of the corresponding ir. */ + const char *name; + + /* Debugging. */ + int (*verify_flow_info) (void); + void (*dump_bb) (FILE *, basic_block, int, dump_flags_t); + void (*dump_bb_for_graph) (pretty_printer *, basic_block); + + /* Basic CFG manipulation. */ + + /* Return new basic block. */ + basic_block (*create_basic_block) (void *head, void *end, basic_block after); + + /* Redirect edge E to the given basic block B and update underlying program + representation. Returns edge representing redirected branch (that may not + be equivalent to E in the case of duplicate edges being removed) or NULL + if edge is not easily redirectable for whatever reason. */ + edge (*redirect_edge_and_branch) (edge e, basic_block b); + + /* Same as the above but allows redirecting of fallthru edges. In that case + newly created forwarder basic block is returned. The edge must + not be abnormal. */ + basic_block (*redirect_edge_and_branch_force) (edge, basic_block); + + /* Returns true if it is possible to remove the edge by redirecting it + to the destination of the other edge going from its source. */ + bool (*can_remove_branch_p) (const_edge); + + /* Remove statements corresponding to a given basic block. */ + void (*delete_basic_block) (basic_block); + + /* Creates a new basic block just after basic block B by splitting + everything after specified instruction I. */ + basic_block (*split_block) (basic_block b, void * i); + + /* Move block B immediately after block A. */ + bool (*move_block_after) (basic_block b, basic_block a); + + /* Return true when blocks A and B can be merged into single basic block. */ + bool (*can_merge_blocks_p) (basic_block a, basic_block b); + + /* Merge blocks A and B. */ + void (*merge_blocks) (basic_block a, basic_block b); + + /* Predict edge E using PREDICTOR to given PROBABILITY. */ + void (*predict_edge) (edge e, enum br_predictor predictor, int probability); + + /* Return true if the one of outgoing edges is already predicted by + PREDICTOR. */ + bool (*predicted_by_p) (const_basic_block bb, enum br_predictor predictor); + + /* Return true when block A can be duplicated. */ + bool (*can_duplicate_block_p) (const_basic_block a); + + /* Duplicate block A. */ + basic_block (*duplicate_block) (basic_block a, copy_bb_data *); + + /* Higher level functions representable by primitive operations above if + we didn't have some oddities in RTL and Tree representations. */ + basic_block (*split_edge) (edge); + void (*make_forwarder_block) (edge); + + /* Try to make the edge fallthru. */ + void (*tidy_fallthru_edge) (edge); + + /* Make the edge non-fallthru. */ + basic_block (*force_nonfallthru) (edge); + + /* Say whether a block ends with a call, possibly followed by some + other code that must stay with the call. */ + bool (*block_ends_with_call_p) (basic_block); + + /* Say whether a block ends with a conditional branch. Switches + and unconditional branches do not qualify. */ + bool (*block_ends_with_condjump_p) (const_basic_block); + + /* Add fake edges to the function exit for any non constant and non noreturn + calls, volatile inline assembly in the bitmap of blocks specified by + BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks + that were split. + + The goal is to expose cases in which entering a basic block does not imply + that all subsequent instructions must be executed. */ + int (*flow_call_edges_add) (sbitmap); + + /* This function is called immediately after edge E is added to the + edge vector E->dest->preds. */ + void (*execute_on_growing_pred) (edge); + + /* This function is called immediately before edge E is removed from + the edge vector E->dest->preds. */ + void (*execute_on_shrinking_pred) (edge); + + /* A hook for duplicating loop in CFG, currently this is used + in loop versioning. */ + bool (*cfg_hook_duplicate_loop_body_to_header_edge) (class loop *, edge, + unsigned, sbitmap, edge, + vec *, int); + + /* Add condition to new basic block and update CFG used in loop + versioning. */ + void (*lv_add_condition_to_bb) (basic_block, basic_block, basic_block, + void *); + /* Update the PHI nodes in case of loop versioning. */ + void (*lv_adjust_loop_header_phi) (basic_block, basic_block, + basic_block, edge); + + /* Given a condition BB extract the true/false taken/not taken edges + (depending if we are on tree's or RTL). */ + void (*extract_cond_bb_edges) (basic_block, edge *, edge *); + + + /* Add PHI arguments queued in PENDINT_STMT list on edge E to edge + E->dest (only in tree-ssa loop versioning. */ + void (*flush_pending_stmts) (edge); + + /* True if a block contains no executable instructions. */ + bool (*empty_block_p) (basic_block); + + /* Split a basic block if it ends with a conditional branch and if + the other part of the block is not empty. */ + basic_block (*split_block_before_cond_jump) (basic_block); + + /* Do book-keeping of a basic block for the profile consistency checker. */ + void (*account_profile_record) (basic_block, struct profile_record *); +}; + +extern void verify_flow_info (void); + +/* Check control flow invariants, if internal consistency checks are + enabled. */ + +static inline void +checking_verify_flow_info (void) +{ + /* TODO: Add a separate option for -fchecking=cfg. */ + if (flag_checking) + verify_flow_info (); +} + +extern void dump_bb (FILE *, basic_block, int, dump_flags_t); +extern void dump_bb_for_graph (pretty_printer *, basic_block); +extern void dump_flow_info (FILE *, dump_flags_t); + +extern edge redirect_edge_and_branch (edge, basic_block); +extern basic_block redirect_edge_and_branch_force (edge, basic_block); +extern edge redirect_edge_succ_nodup (edge, basic_block); +extern bool can_remove_branch_p (const_edge); +extern void remove_branch (edge); +extern void remove_edge (edge); +extern edge split_block (basic_block, rtx); +extern edge split_block (basic_block, gimple *); +extern edge split_block_after_labels (basic_block); +extern bool move_block_after (basic_block, basic_block); +extern void delete_basic_block (basic_block); +extern basic_block split_edge (edge); +extern basic_block create_basic_block (rtx, rtx, basic_block); +extern basic_block create_basic_block (gimple_seq, basic_block); +extern basic_block create_empty_bb (basic_block); +extern bool can_merge_blocks_p (basic_block, basic_block); +extern void merge_blocks (basic_block, basic_block); +extern edge make_forwarder_block (basic_block, bool (*)(edge), + void (*) (basic_block)); +extern basic_block force_nonfallthru (edge); +extern void tidy_fallthru_edge (edge); +extern void tidy_fallthru_edges (void); +extern void predict_edge (edge e, enum br_predictor predictor, int probability); +extern bool predicted_by_p (const_basic_block bb, enum br_predictor predictor); +extern bool can_duplicate_block_p (const_basic_block); +extern basic_block duplicate_block (basic_block, edge, basic_block, + copy_bb_data * = NULL); +extern bool block_ends_with_call_p (basic_block bb); +extern bool empty_block_p (basic_block); +extern basic_block split_block_before_cond_jump (basic_block); +extern bool block_ends_with_condjump_p (const_basic_block bb); +extern int flow_call_edges_add (sbitmap); +extern void execute_on_growing_pred (edge); +extern void execute_on_shrinking_pred (edge); +extern bool +cfg_hook_duplicate_loop_body_to_header_edge (class loop *loop, edge, + unsigned int ndupl, + sbitmap wont_exit, edge orig, + vec *to_remove, int flags); + +extern void lv_flush_pending_stmts (edge); +extern void extract_cond_bb_edges (basic_block, edge *, edge*); +extern void lv_adjust_loop_header_phi (basic_block, basic_block, basic_block, + edge); +extern void lv_add_condition_to_bb (basic_block, basic_block, basic_block, + void *); + +extern bool can_copy_bbs_p (basic_block *, unsigned); +extern void copy_bbs (basic_block *, unsigned, basic_block *, + edge *, unsigned, edge *, class loop *, + basic_block, bool); + +void profile_record_check_consistency (profile_record *); +void profile_record_account_profile (profile_record *); + +/* Hooks containers. */ +extern struct cfg_hooks gimple_cfg_hooks; +extern struct cfg_hooks rtl_cfg_hooks; +extern struct cfg_hooks cfg_layout_rtl_cfg_hooks; + +/* Declarations. */ +extern enum ir_type current_ir_type (void); +extern void rtl_register_cfg_hooks (void); +extern void cfg_layout_rtl_register_cfg_hooks (void); +extern void gimple_register_cfg_hooks (void); +extern struct cfg_hooks get_cfg_hooks (void); +extern void set_cfg_hooks (struct cfg_hooks); + +#endif /* GCC_CFGHOOKS_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgloop.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgloop.h new file mode 100644 index 0000000..d2714e2 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgloop.h @@ -0,0 +1,936 @@ +/* Natural loop functions + Copyright (C) 1987-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CFGLOOP_H +#define GCC_CFGLOOP_H + +#include "cfgloopmanip.h" + +/* Structure to hold decision about unrolling/peeling. */ +enum lpt_dec +{ + LPT_NONE, + LPT_UNROLL_CONSTANT, + LPT_UNROLL_RUNTIME, + LPT_UNROLL_STUPID +}; + +struct GTY (()) lpt_decision { + enum lpt_dec decision; + unsigned times; +}; + +/* The type of extend applied to an IV. */ +enum iv_extend_code +{ + IV_SIGN_EXTEND, + IV_ZERO_EXTEND, + IV_UNKNOWN_EXTEND +}; + +/* The structure describing a bound on number of iterations of a loop. */ + +class GTY ((chain_next ("%h.next"))) nb_iter_bound { +public: + /* The statement STMT is executed at most ... */ + gimple *stmt; + + /* ... BOUND + 1 times (BOUND must be an unsigned constant). + The + 1 is added for the following reasons: + + a) 0 would otherwise be unused, while we would need to care more about + overflows (as MAX + 1 is sometimes produced as the estimate on number + of executions of STMT). + b) it is consistent with the result of number_of_iterations_exit. */ + widest_int bound; + + /* True if, after executing the statement BOUND + 1 times, we will + leave the loop; that is, all the statements after it are executed at most + BOUND times. */ + bool is_exit; + + /* The next bound in the list. */ + class nb_iter_bound *next; +}; + +/* Description of the loop exit. */ + +struct GTY ((for_user)) loop_exit { + /* The exit edge. */ + edge e; + + /* Previous and next exit in the list of the exits of the loop. */ + struct loop_exit *prev; + struct loop_exit *next; + + /* Next element in the list of loops from that E exits. */ + struct loop_exit *next_e; +}; + +struct loop_exit_hasher : ggc_ptr_hash +{ + typedef edge compare_type; + + static hashval_t hash (loop_exit *); + static bool equal (loop_exit *, edge); + static void remove (loop_exit *); +}; + +typedef class loop *loop_p; + +/* An integer estimation of the number of iterations. Estimate_state + describes what is the state of the estimation. */ +enum loop_estimation +{ + /* Estimate was not computed yet. */ + EST_NOT_COMPUTED, + /* Estimate is ready. */ + EST_AVAILABLE, + EST_LAST +}; + +/* The structure describing non-overflow control induction variable for + loop's exit edge. */ +struct GTY ((chain_next ("%h.next"))) control_iv { + tree base; + tree step; + struct control_iv *next; +}; + +/* Structure to hold information for each natural loop. */ +class GTY ((chain_next ("%h.next"))) loop { +public: + /* Index into loops array. Note indices will never be reused after loop + is destroyed. */ + int num; + + /* Number of loop insns. */ + unsigned ninsns; + + /* Basic block of loop header. */ + basic_block header; + + /* Basic block of loop latch. */ + basic_block latch; + + /* For loop unrolling/peeling decision. */ + struct lpt_decision lpt_decision; + + /* Average number of executed insns per iteration. */ + unsigned av_ninsns; + + /* Number of blocks contained within the loop. */ + unsigned num_nodes; + + /* Superloops of the loop, starting with the outermost loop. */ + vec *superloops; + + /* The first inner (child) loop or NULL if innermost loop. */ + class loop *inner; + + /* Link to the next (sibling) loop. */ + class loop *next; + + /* Auxiliary info specific to a pass. */ + PTR GTY ((skip (""))) aux; + + /* The number of times the latch of the loop is executed. This can be an + INTEGER_CST, or a symbolic expression representing the number of + iterations like "N - 1", or a COND_EXPR containing the runtime + conditions under which the number of iterations is non zero. + + Don't access this field directly: number_of_latch_executions + computes and caches the computed information in this field. */ + tree nb_iterations; + + /* An integer guaranteed to be greater or equal to nb_iterations. Only + valid if any_upper_bound is true. */ + widest_int nb_iterations_upper_bound; + + widest_int nb_iterations_likely_upper_bound; + + /* An integer giving an estimate on nb_iterations. Unlike + nb_iterations_upper_bound, there is no guarantee that it is at least + nb_iterations. */ + widest_int nb_iterations_estimate; + + /* If > 0, an integer, where the user asserted that for any + I in [ 0, nb_iterations ) and for any J in + [ I, min ( I + safelen, nb_iterations ) ), the Ith and Jth iterations + of the loop can be safely evaluated concurrently. */ + int safelen; + + /* Preferred vectorization factor for the loop if non-zero. */ + int simdlen; + + /* Constraints are generally set by consumers and affect certain + semantics of niter analyzer APIs. Currently the APIs affected are + number_of_iterations_exit* functions and their callers. One typical + use case of constraints is to vectorize possibly infinite loop: + + 1) Compute niter->assumptions by calling niter analyzer API and + record it as possible condition for loop versioning. + 2) Clear buffered result of niter/scev analyzer. + 3) Set constraint LOOP_C_FINITE assuming the loop is finite. + 4) Analyze data references. Since data reference analysis depends + on niter/scev analyzer, the point is that niter/scev analysis + is done under circumstance of LOOP_C_FINITE constraint. + 5) Version the loop with niter->assumptions computed in step 1). + 6) Vectorize the versioned loop in which niter->assumptions is + checked to be true. + 7) Update constraints in versioned loops so that niter analyzer + in following passes can use it. + + Note consumers are usually the loop optimizers and it is consumers' + responsibility to set/clear constraints correctly. Failing to do + that might result in hard to track down bugs in niter/scev consumers. */ + unsigned constraints; + + /* An integer estimation of the number of iterations. Estimate_state + describes what is the state of the estimation. */ + ENUM_BITFIELD(loop_estimation) estimate_state : 8; + + unsigned any_upper_bound : 1; + unsigned any_estimate : 1; + unsigned any_likely_upper_bound : 1; + + /* True if the loop can be parallel. */ + unsigned can_be_parallel : 1; + + /* True if -Waggressive-loop-optimizations warned about this loop + already. */ + unsigned warned_aggressive_loop_optimizations : 1; + + /* True if this loop should never be vectorized. */ + unsigned dont_vectorize : 1; + + /* True if we should try harder to vectorize this loop. */ + unsigned force_vectorize : 1; + + /* True if the loop is part of an oacc kernels region. */ + unsigned in_oacc_kernels_region : 1; + + /* True if the loop is known to be finite. This is a localized + flag_finite_loops or similar pragmas state. */ + unsigned finite_p : 1; + + /* The number of times to unroll the loop. 0 means no information given, + just do what we always do. A value of 1 means do not unroll the loop. + A value of USHRT_MAX means unroll with no specific unrolling factor. + Other values means unroll with the given unrolling factor. */ + unsigned short unroll; + + /* If this loop was inlined the main clique of the callee which does + not need remapping when copying the loop body. */ + unsigned short owned_clique; + + /* For SIMD loops, this is a unique identifier of the loop, referenced + by IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LANE and IFN_GOMP_SIMD_LAST_LANE + builtins. */ + tree simduid; + + /* In loop optimization, it's common to generate loops from the original + loop. This field records the index of the original loop which can be + used to track the original loop from newly generated loops. This can + be done by calling function get_loop (cfun, orig_loop_num). Note the + original loop could be destroyed for various reasons thus no longer + exists, as a result, function call to get_loop returns NULL pointer. + In this case, this field should not be used and needs to be cleared + whenever possible. */ + int orig_loop_num; + + /* Upper bound on number of iterations of a loop. */ + class nb_iter_bound *bounds; + + /* Non-overflow control ivs of a loop. */ + struct control_iv *control_ivs; + + /* Head of the cyclic list of the exits of the loop. */ + struct loop_exit *exits; + + /* Number of iteration analysis data for RTL. */ + class niter_desc *simple_loop_desc; + + /* For sanity checking during loop fixup we record here the former + loop header for loops marked for removal. Note that this prevents + the basic-block from being collected but its index can still be + reused. */ + basic_block former_header; +}; + +/* Set if the loop is known to be infinite. */ +#define LOOP_C_INFINITE (1 << 0) +/* Set if the loop is known to be finite without any assumptions. */ +#define LOOP_C_FINITE (1 << 1) + +/* Set C to the LOOP constraint. */ +static inline void +loop_constraint_set (class loop *loop, unsigned c) +{ + loop->constraints |= c; +} + +/* Clear C from the LOOP constraint. */ +static inline void +loop_constraint_clear (class loop *loop, unsigned c) +{ + loop->constraints &= ~c; +} + +/* Check if C is set in the LOOP constraint. */ +static inline bool +loop_constraint_set_p (class loop *loop, unsigned c) +{ + return (loop->constraints & c) == c; +} + +/* Flags for state of loop structure. */ +enum +{ + LOOPS_HAVE_PREHEADERS = 1, + LOOPS_HAVE_SIMPLE_LATCHES = 2, + LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS = 4, + LOOPS_HAVE_RECORDED_EXITS = 8, + LOOPS_MAY_HAVE_MULTIPLE_LATCHES = 16, + LOOP_CLOSED_SSA = 32, + LOOPS_NEED_FIXUP = 64, + LOOPS_HAVE_FALLTHRU_PREHEADERS = 128 +}; + +#define LOOPS_NORMAL (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES \ + | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS) +#define AVOID_CFG_MODIFICATIONS (LOOPS_MAY_HAVE_MULTIPLE_LATCHES) + +/* Structure to hold CFG information about natural loops within a function. */ +struct GTY (()) loops { + /* State of loops. */ + int state; + + /* Array of the loops. */ + vec *larray; + + /* Maps edges to the list of their descriptions as loop exits. Edges + whose sources or destinations have loop_father == NULL (which may + happen during the cfg manipulations) should not appear in EXITS. */ + hash_table *GTY(()) exits; + + /* Pointer to root of loop hierarchy tree. */ + class loop *tree_root; +}; + +/* Loop recognition. */ +bool bb_loop_header_p (basic_block); +void init_loops_structure (struct function *, struct loops *, unsigned); +extern struct loops *flow_loops_find (struct loops *); +extern void disambiguate_loops_with_multiple_latches (void); +extern void flow_loops_free (struct loops *); +extern void flow_loops_dump (FILE *, + void (*)(const class loop *, FILE *, int), int); +extern void flow_loop_dump (const class loop *, FILE *, + void (*)(const class loop *, FILE *, int), int); +class loop *alloc_loop (void); +extern void flow_loop_free (class loop *); +int flow_loop_nodes_find (basic_block, class loop *); +unsigned fix_loop_structure (bitmap changed_bbs); +bool mark_irreducible_loops (void); +void release_recorded_exits (function *); +void record_loop_exits (void); +void rescan_loop_exit (edge, bool, bool); +void sort_sibling_loops (function *); + +/* Loop data structure manipulation/querying. */ +extern void flow_loop_tree_node_add (class loop *, class loop *, + class loop * = NULL); +extern void flow_loop_tree_node_remove (class loop *); +extern bool flow_loop_nested_p (const class loop *, const class loop *); +extern bool flow_bb_inside_loop_p (const class loop *, const_basic_block); +extern class loop * find_common_loop (class loop *, class loop *); +class loop *superloop_at_depth (class loop *, unsigned); +struct eni_weights; +extern int num_loop_insns (const class loop *); +extern int average_num_loop_insns (const class loop *); +extern unsigned get_loop_level (const class loop *); +extern bool loop_exit_edge_p (const class loop *, const_edge); +extern bool loop_exits_to_bb_p (class loop *, basic_block); +extern bool loop_exits_from_bb_p (class loop *, basic_block); +extern void mark_loop_exit_edges (void); +extern dump_user_location_t get_loop_location (class loop *loop); + +/* Loops & cfg manipulation. */ +extern basic_block *get_loop_body (const class loop *); +extern unsigned get_loop_body_with_size (const class loop *, basic_block *, + unsigned); +extern basic_block *get_loop_body_in_dom_order (const class loop *); +extern basic_block *get_loop_body_in_bfs_order (const class loop *); +extern basic_block *get_loop_body_in_custom_order (const class loop *, + int (*) (const void *, const void *)); +extern basic_block *get_loop_body_in_custom_order (const class loop *, void *, + int (*) (const void *, const void *, void *)); + +extern auto_vec get_loop_exit_edges (const class loop *, basic_block * = NULL); +extern edge single_exit (const class loop *); +extern edge single_likely_exit (class loop *loop, const vec &); +extern unsigned num_loop_branches (const class loop *); + +extern edge loop_preheader_edge (const class loop *); +extern edge loop_latch_edge (const class loop *); + +extern void add_bb_to_loop (basic_block, class loop *); +extern void remove_bb_from_loops (basic_block); + +extern void cancel_loop_tree (class loop *); +extern void delete_loop (class loop *); + + +extern void verify_loop_structure (void); + +/* Loop analysis. */ +extern bool just_once_each_iteration_p (const class loop *, const_basic_block); +gcov_type expected_loop_iterations_unbounded (const class loop *, + bool *read_profile_p = NULL, bool by_profile_only = false); +extern unsigned expected_loop_iterations (class loop *); +extern rtx doloop_condition_get (rtx_insn *); + +void mark_loop_for_removal (loop_p); + +/* Induction variable analysis. */ + +/* The description of induction variable. The things are a bit complicated + due to need to handle subregs and extends. The value of the object described + by it can be obtained as follows (all computations are done in extend_mode): + + Value in i-th iteration is + delta + mult * extend_{extend_mode} (subreg_{mode} (base + i * step)). + + If first_special is true, the value in the first iteration is + delta + mult * base + + If extend = UNKNOWN, first_special must be false, delta 0, mult 1 and value is + subreg_{mode} (base + i * step) + + The get_iv_value function can be used to obtain these expressions. + + ??? Add a third mode field that would specify the mode in that inner + computation is done, which would enable it to be different from the + outer one? */ + +class rtx_iv +{ +public: + /* Its base and step (mode of base and step is supposed to be extend_mode, + see the description above). */ + rtx base, step; + + /* The type of extend applied to it (IV_SIGN_EXTEND, IV_ZERO_EXTEND, + or IV_UNKNOWN_EXTEND). */ + enum iv_extend_code extend; + + /* Operations applied in the extended mode. */ + rtx delta, mult; + + /* The mode it is extended to. */ + scalar_int_mode extend_mode; + + /* The mode the variable iterates in. */ + scalar_int_mode mode; + + /* Whether the first iteration needs to be handled specially. */ + unsigned first_special : 1; +}; + +/* The description of an exit from the loop and of the number of iterations + till we take the exit. */ + +class GTY(()) niter_desc +{ +public: + /* The edge out of the loop. */ + edge out_edge; + + /* The other edge leading from the condition. */ + edge in_edge; + + /* True if we are able to say anything about number of iterations of the + loop. */ + bool simple_p; + + /* True if the loop iterates the constant number of times. */ + bool const_iter; + + /* Number of iterations if constant. */ + uint64_t niter; + + /* Assumptions under that the rest of the information is valid. */ + rtx assumptions; + + /* Assumptions under that the loop ends before reaching the latch, + even if value of niter_expr says otherwise. */ + rtx noloop_assumptions; + + /* Condition under that the loop is infinite. */ + rtx infinite; + + /* Whether the comparison is signed. */ + bool signed_p; + + /* The mode in that niter_expr should be computed. */ + scalar_int_mode mode; + + /* The number of iterations of the loop. */ + rtx niter_expr; +}; + +extern void iv_analysis_loop_init (class loop *); +extern bool iv_analyze (rtx_insn *, scalar_int_mode, rtx, class rtx_iv *); +extern bool iv_analyze_result (rtx_insn *, rtx, class rtx_iv *); +extern bool iv_analyze_expr (rtx_insn *, scalar_int_mode, rtx, + class rtx_iv *); +extern rtx get_iv_value (class rtx_iv *, rtx); +extern bool biv_p (rtx_insn *, scalar_int_mode, rtx); +extern void iv_analysis_done (void); + +extern class niter_desc *get_simple_loop_desc (class loop *loop); +extern void free_simple_loop_desc (class loop *loop); + +static inline class niter_desc * +simple_loop_desc (class loop *loop) +{ + return loop->simple_loop_desc; +} + +/* Accessors for the loop structures. */ + +/* Returns the loop with index NUM from FNs loop tree. */ + +static inline class loop * +get_loop (struct function *fn, unsigned num) +{ + return (*loops_for_fn (fn)->larray)[num]; +} + +/* Returns the number of superloops of LOOP. */ + +static inline unsigned +loop_depth (const class loop *loop) +{ + return vec_safe_length (loop->superloops); +} + +/* Returns the immediate superloop of LOOP, or NULL if LOOP is the outermost + loop. */ + +static inline class loop * +loop_outer (const class loop *loop) +{ + unsigned n = vec_safe_length (loop->superloops); + + if (n == 0) + return NULL; + + return (*loop->superloops)[n - 1]; +} + +/* Returns true if LOOP has at least one exit edge. */ + +static inline bool +loop_has_exit_edges (const class loop *loop) +{ + return loop->exits->next->e != NULL; +} + +/* Returns the list of loops in FN. */ + +inline vec * +get_loops (struct function *fn) +{ + struct loops *loops = loops_for_fn (fn); + if (!loops) + return NULL; + + return loops->larray; +} + +/* Returns the number of loops in FN (including the removed + ones and the fake loop that forms the root of the loop tree). */ + +static inline unsigned +number_of_loops (struct function *fn) +{ + struct loops *loops = loops_for_fn (fn); + if (!loops) + return 0; + + return vec_safe_length (loops->larray); +} + +/* Returns true if state of the loops satisfies all properties + described by FLAGS. */ + +static inline bool +loops_state_satisfies_p (function *fn, unsigned flags) +{ + return (loops_for_fn (fn)->state & flags) == flags; +} + +static inline bool +loops_state_satisfies_p (unsigned flags) +{ + return loops_state_satisfies_p (cfun, flags); +} + +/* Sets FLAGS to the loops state. */ + +static inline void +loops_state_set (function *fn, unsigned flags) +{ + loops_for_fn (fn)->state |= flags; +} + +static inline void +loops_state_set (unsigned flags) +{ + loops_state_set (cfun, flags); +} + +/* Clears FLAGS from the loops state. */ + +static inline void +loops_state_clear (function *fn, unsigned flags) +{ + loops_for_fn (fn)->state &= ~flags; +} + +static inline void +loops_state_clear (unsigned flags) +{ + if (!current_loops) + return; + loops_state_clear (cfun, flags); +} + +/* Check loop structure invariants, if internal consistency checks are + enabled. */ + +static inline void +checking_verify_loop_structure (void) +{ + /* VERIFY_LOOP_STRUCTURE essentially asserts that no loops need fixups. + + The loop optimizers should never make changes to the CFG which + require loop fixups. But the low level CFG manipulation code may + set the flag conservatively. + + Go ahead and clear the flag here. That avoids the assert inside + VERIFY_LOOP_STRUCTURE, and if there is an inconsistency in the loop + structures VERIFY_LOOP_STRUCTURE will detect it. + + This also avoid the compile time cost of excessive fixups. */ + loops_state_clear (LOOPS_NEED_FIXUP); + if (flag_checking) + verify_loop_structure (); +} + +/* Loop iterators. */ + +/* Flags for loop iteration. */ + +enum li_flags +{ + LI_INCLUDE_ROOT = 1, /* Include the fake root of the loop tree. */ + LI_FROM_INNERMOST = 2, /* Iterate over the loops in the reverse order, + starting from innermost ones. */ + LI_ONLY_INNERMOST = 4 /* Iterate only over innermost loops. */ +}; + +/* Provide the functionality of std::as_const to support range-based for + to use const iterator. (We can't use std::as_const itself because it's + a C++17 feature.) */ +template +constexpr const T & +as_const (T &t) +{ + return t; +} + +/* A list for visiting loops, which contains the loop numbers instead of + the loop pointers. If the loop ROOT is offered (non-null), the visiting + will start from it, otherwise it would start from the tree_root of + loops_for_fn (FN) instead. The scope is restricted in function FN and + the visiting order is specified by FLAGS. */ + +class loops_list +{ +public: + loops_list (function *fn, unsigned flags, class loop *root = nullptr); + + template class Iter + { + public: + Iter (const loops_list &l, unsigned idx) : list (l), curr_idx (idx) + { + fill_curr_loop (); + } + + T operator* () const { return curr_loop; } + + Iter & + operator++ () + { + if (curr_idx < list.to_visit.length ()) + { + /* Bump the index and fill a new one. */ + curr_idx++; + fill_curr_loop (); + } + else + gcc_assert (!curr_loop); + + return *this; + } + + bool + operator!= (const Iter &rhs) const + { + return this->curr_idx != rhs.curr_idx; + } + + private: + /* Fill the current loop starting from the current index. */ + void fill_curr_loop (); + + /* Reference to the loop list to visit. */ + const loops_list &list; + + /* The current index in the list to visit. */ + unsigned curr_idx; + + /* The loop implied by the current index. */ + class loop *curr_loop; + }; + + using iterator = Iter; + using const_iterator = Iter; + + iterator + begin () + { + return iterator (*this, 0); + } + + iterator + end () + { + return iterator (*this, to_visit.length ()); + } + + const_iterator + begin () const + { + return const_iterator (*this, 0); + } + + const_iterator + end () const + { + return const_iterator (*this, to_visit.length ()); + } + +private: + /* Walk loop tree starting from ROOT as the visiting order specified + by FLAGS. */ + void walk_loop_tree (class loop *root, unsigned flags); + + /* The function we are visiting. */ + function *fn; + + /* The list of loops to visit. */ + auto_vec to_visit; +}; + +/* Starting from current index CURR_IDX (inclusive), find one index + which stands for one valid loop and fill the found loop as CURR_LOOP, + if we can't find one, set CURR_LOOP as null. */ + +template +inline void +loops_list::Iter::fill_curr_loop () +{ + int anum; + + while (this->list.to_visit.iterate (this->curr_idx, &anum)) + { + class loop *loop = get_loop (this->list.fn, anum); + if (loop) + { + curr_loop = loop; + return; + } + this->curr_idx++; + } + + curr_loop = nullptr; +} + +/* Set up the loops list to visit according to the specified + function scope FN and iterating order FLAGS. If ROOT is + not null, the visiting would start from it, otherwise it + will start from tree_root of loops_for_fn (FN). */ + +inline loops_list::loops_list (function *fn, unsigned flags, class loop *root) +{ + struct loops *loops = loops_for_fn (fn); + gcc_assert (!root || loops); + + /* Check mutually exclusive flags should not co-exist. */ + unsigned checked_flags = LI_ONLY_INNERMOST | LI_FROM_INNERMOST; + gcc_assert ((flags & checked_flags) != checked_flags); + + this->fn = fn; + if (!loops) + return; + + class loop *tree_root = root ? root : loops->tree_root; + + this->to_visit.reserve_exact (number_of_loops (fn)); + + /* When root is tree_root of loops_for_fn (fn) and the visiting + order is LI_ONLY_INNERMOST, we would like to use linear + search here since it has a more stable bound than the + walk_loop_tree. */ + if (flags & LI_ONLY_INNERMOST && tree_root == loops->tree_root) + { + gcc_assert (tree_root->num == 0); + if (tree_root->inner == NULL) + { + if (flags & LI_INCLUDE_ROOT) + this->to_visit.quick_push (0); + + return; + } + + class loop *aloop; + unsigned int i; + for (i = 1; vec_safe_iterate (loops->larray, i, &aloop); i++) + if (aloop != NULL && aloop->inner == NULL) + this->to_visit.quick_push (aloop->num); + } + else + walk_loop_tree (tree_root, flags); +} + +/* The properties of the target. */ +struct target_cfgloop { + /* Number of available registers. */ + unsigned x_target_avail_regs; + + /* Number of available registers that are call-clobbered. */ + unsigned x_target_clobbered_regs; + + /* Number of registers reserved for temporary expressions. */ + unsigned x_target_res_regs; + + /* The cost for register when there still is some reserve, but we are + approaching the number of available registers. */ + unsigned x_target_reg_cost[2]; + + /* The cost for register when we need to spill. */ + unsigned x_target_spill_cost[2]; +}; + +extern struct target_cfgloop default_target_cfgloop; +#if SWITCHABLE_TARGET +extern struct target_cfgloop *this_target_cfgloop; +#else +#define this_target_cfgloop (&default_target_cfgloop) +#endif + +#define target_avail_regs \ + (this_target_cfgloop->x_target_avail_regs) +#define target_clobbered_regs \ + (this_target_cfgloop->x_target_clobbered_regs) +#define target_res_regs \ + (this_target_cfgloop->x_target_res_regs) +#define target_reg_cost \ + (this_target_cfgloop->x_target_reg_cost) +#define target_spill_cost \ + (this_target_cfgloop->x_target_spill_cost) + +/* Register pressure estimation for induction variable optimizations & loop + invariant motion. */ +extern unsigned estimate_reg_pressure_cost (unsigned, unsigned, bool, bool); +extern void init_set_costs (void); + +/* Loop optimizer initialization. */ +extern void loop_optimizer_init (unsigned); +extern void loop_optimizer_finalize (function *, bool = false); +inline void +loop_optimizer_finalize () +{ + loop_optimizer_finalize (cfun); +} + +/* Optimization passes. */ +enum +{ + UAP_UNROLL = 1, /* Enables unrolling of loops if it seems profitable. */ + UAP_UNROLL_ALL = 2 /* Enables unrolling of all loops. */ +}; + +extern void doloop_optimize_loops (void); +extern void move_loop_invariants (void); +extern auto_vec get_loop_hot_path (const class loop *loop); + +/* Returns the outermost loop of the loop nest that contains LOOP.*/ +static inline class loop * +loop_outermost (class loop *loop) +{ + unsigned n = vec_safe_length (loop->superloops); + + if (n <= 1) + return loop; + + return (*loop->superloops)[1]; +} + +extern void record_niter_bound (class loop *, const widest_int &, bool, bool); +extern HOST_WIDE_INT get_estimated_loop_iterations_int (class loop *); +extern HOST_WIDE_INT get_max_loop_iterations_int (const class loop *); +extern HOST_WIDE_INT get_likely_max_loop_iterations_int (class loop *); +extern bool get_estimated_loop_iterations (class loop *loop, widest_int *nit); +extern bool get_max_loop_iterations (const class loop *loop, widest_int *nit); +extern bool get_likely_max_loop_iterations (class loop *loop, widest_int *nit); +extern int bb_loop_depth (const_basic_block); + +/* Converts VAL to widest_int. */ + +static inline widest_int +gcov_type_to_wide_int (gcov_type val) +{ + HOST_WIDE_INT a[2]; + + a[0] = (unsigned HOST_WIDE_INT) val; + /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by + the size of type. */ + val >>= HOST_BITS_PER_WIDE_INT - 1; + val >>= 1; + a[1] = (unsigned HOST_WIDE_INT) val; + + return widest_int::from_array (a, 2); +} +#endif /* GCC_CFGLOOP_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgloopmanip.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgloopmanip.h new file mode 100644 index 0000000..17c9a1b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgloopmanip.h @@ -0,0 +1,63 @@ +/* Loop manipulation header. + Copyright (C) 2014-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CFGLOOPMANIP_H +#define GCC_CFGLOOPMANIP_H + +enum +{ + CP_SIMPLE_PREHEADERS = 1, + CP_FALLTHRU_PREHEADERS = 2 +}; + +#define DLTHE_FLAG_UPDATE_FREQ 1 /* Update frequencies in + duplicate_loop_to_header_edge. */ +#define DLTHE_RECORD_COPY_NUMBER 2 /* Record copy number in the aux + field of newly create BB. */ +#define DLTHE_FLAG_COMPLETTE_PEEL 4 /* Update frequencies expecting + a complete peeling. */ +extern edge mfb_kj_edge; + +extern bool remove_path (edge, bool * = NULL, bitmap = NULL); +extern void place_new_loop (struct function *, class loop *); +extern void add_loop (class loop *, class loop *); +extern void scale_loop_frequencies (class loop *, profile_probability); +extern void scale_loop_profile (class loop *, profile_probability, gcov_type); +extern edge create_empty_if_region_on_edge (edge, tree); +extern class loop *create_empty_loop_on_edge (edge, tree, tree, tree, tree, + tree *, tree *, class loop *); +extern void unloop (class loop *, bool *, bitmap); +extern void copy_loop_info (class loop *loop, class loop *target); +extern class loop * duplicate_loop (class loop *, class loop *, + class loop * = NULL); +extern void duplicate_subloops (class loop *, class loop *); +extern bool can_duplicate_loop_p (const class loop *loop); +extern bool +duplicate_loop_body_to_header_edge (class loop *, edge, unsigned, sbitmap, edge, + vec *, int); +extern bool mfb_keep_just (edge); +basic_block create_preheader (class loop *, int); +extern void create_preheaders (int); +extern void force_single_succ_latches (void); +class loop * loop_version (class loop *, void *, + basic_block *, + profile_probability, profile_probability, + profile_probability, profile_probability, bool); + +#endif /* GCC_CFGLOOPMANIP_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgrtl.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgrtl.h new file mode 100644 index 0000000..7deaa4f --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cfgrtl.h @@ -0,0 +1,61 @@ +/* Define control flow data structures for the CFG. + Copyright (C) 2014-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CFGRTL_H +#define GCC_CFGRTL_H + +extern void delete_insn (rtx_insn *); +extern bool delete_insn_and_edges (rtx_insn *); +extern void delete_insn_chain (rtx, rtx_insn *, bool); +extern basic_block create_basic_block_structure (rtx_insn *, rtx_insn *, + rtx_note *, basic_block); +extern void compute_bb_for_insn (void); +extern unsigned int free_bb_for_insn (void); +extern rtx_insn *entry_of_function (void); +extern void update_bb_for_insn (basic_block); +extern bool contains_no_active_insn_p (const_basic_block); +extern bool forwarder_block_p (const_basic_block); +extern bool can_fallthru (basic_block, basic_block); +extern rtx_note *bb_note (basic_block); +extern rtx_code_label *block_label (basic_block); +extern edge try_redirect_by_replacing_jump (edge, basic_block, bool); +extern void emit_barrier_after_bb (basic_block bb); +extern basic_block force_nonfallthru_and_redirect (edge, basic_block, rtx); +extern void insert_insn_on_edge (rtx, edge); +extern void commit_one_edge_insertion (edge e); +extern void commit_edge_insertions (void); +extern void print_rtl_with_bb (FILE *, const rtx_insn *, dump_flags_t); +extern void update_br_prob_note (basic_block); +extern rtx_insn *get_last_bb_insn (basic_block); +extern void fixup_partitions (void); +extern bool purge_dead_edges (basic_block); +extern bool purge_all_dead_edges (void); +extern bool fixup_abnormal_edges (void); +extern void update_cfg_for_uncondjump (rtx_insn *); +extern rtx_insn *unlink_insn_chain (rtx_insn *, rtx_insn *); +extern void relink_block_chain (bool); +extern rtx_insn *duplicate_insn_chain (rtx_insn *, rtx_insn *, + class loop *, class copy_bb_data *); +extern void cfg_layout_initialize (int); +extern void cfg_layout_finalize (void); +extern void break_superblocks (void); +extern void init_rtl_bb_info (basic_block); +extern void find_bbs_reachable_by_hot_paths (hash_set *); + +#endif /* GCC_CFGRTL_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cgraph.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cgraph.h new file mode 100644 index 0000000..8c512b6 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cgraph.h @@ -0,0 +1,3575 @@ +/* Callgraph handling code. + Copyright (C) 2003-2022 Free Software Foundation, Inc. + Contributed by Jan Hubicka + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CGRAPH_H +#define GCC_CGRAPH_H + +#include "profile-count.h" +#include "ipa-ref.h" +#include "plugin-api.h" +#include "ipa-param-manipulation.h" + +extern void debuginfo_early_init (void); +extern void debuginfo_init (void); +extern void debuginfo_fini (void); +extern void debuginfo_start (void); +extern void debuginfo_stop (void); +extern void debuginfo_early_start (void); +extern void debuginfo_early_stop (void); + +class ipa_opt_pass_d; +typedef ipa_opt_pass_d *ipa_opt_pass; + +/* Symbol table consists of functions and variables. + TODO: add labels and CONST_DECLs. */ +enum symtab_type +{ + SYMTAB_SYMBOL, + SYMTAB_FUNCTION, + SYMTAB_VARIABLE +}; + +/* Section names are stored as reference counted strings in GGC safe hashtable + (to make them survive through PCH). */ + +struct GTY((for_user)) section_hash_entry +{ + int ref_count; + char *name; /* As long as this datastructure stays in GGC, we cannot put + string at the tail of structure of GGC dies in horrible + way */ +}; + +struct section_name_hasher : ggc_ptr_hash +{ + typedef const char *compare_type; + + static hashval_t hash (section_hash_entry *); + static bool equal (section_hash_entry *, const char *); +}; + +enum availability +{ + /* Not yet set by cgraph_function_body_availability. */ + AVAIL_UNSET, + /* Function body/variable initializer is unknown. */ + AVAIL_NOT_AVAILABLE, + /* Function body/variable initializer is known but might be replaced + by a different one from other compilation unit and thus needs to + be dealt with a care. Like AVAIL_NOT_AVAILABLE it can have + arbitrary side effects on escaping variables and functions, while + like AVAILABLE it might access static variables. */ + AVAIL_INTERPOSABLE, + /* Function body/variable initializer is known and will be used in final + program. */ + AVAIL_AVAILABLE, + /* Function body/variable initializer is known and all it's uses are + explicitly visible within current unit (i.e. it's address is never taken + and it is not exported to other units). Currently used only for + functions. */ + AVAIL_LOCAL +}; + +/* Classification of symbols WRT partitioning. */ +enum symbol_partitioning_class +{ + /* External declarations are ignored by partitioning algorithms and they are + added into the boundary later via compute_ltrans_boundary. */ + SYMBOL_EXTERNAL, + /* Partitioned symbols are put into one of partitions. */ + SYMBOL_PARTITION, + /* Duplicated symbols (such as comdat or constant pool references) are + copied into every node needing them via add_symbol_to_partition. */ + SYMBOL_DUPLICATE +}; + +/* Base of all entries in the symbol table. + The symtab_node is inherited by cgraph and varpol nodes. */ +struct GTY((desc ("%h.type"), tag ("SYMTAB_SYMBOL"), + chain_next ("%h.next"), chain_prev ("%h.previous"))) + symtab_node +{ +public: + friend class symbol_table; + + /* Constructor. */ + explicit symtab_node (symtab_type t) + : type (t), resolution (LDPR_UNKNOWN), definition (false), alias (false), + transparent_alias (false), weakref (false), cpp_implicit_alias (false), + symver (false), analyzed (false), writeonly (false), + refuse_visibility_changes (false), externally_visible (false), + no_reorder (false), force_output (false), forced_by_abi (false), + unique_name (false), implicit_section (false), body_removed (false), + semantic_interposition (flag_semantic_interposition), + used_from_other_partition (false), in_other_partition (false), + address_taken (false), in_init_priority_hash (false), + need_lto_streaming (false), offloadable (false), ifunc_resolver (false), + order (false), next_sharing_asm_name (NULL), + previous_sharing_asm_name (NULL), same_comdat_group (NULL), ref_list (), + alias_target (NULL), lto_file_data (NULL), aux (NULL), + x_comdat_group (NULL_TREE), x_section (NULL) + {} + + /* Return name. */ + const char *name () const; + + /* Return dump name. */ + const char *dump_name () const; + + /* Return asm name. */ + const char *asm_name () const; + + /* Return dump name with assembler name. */ + const char *dump_asm_name () const; + + /* Return visibility name. */ + const char *get_visibility_string () const; + + /* Return type_name name. */ + const char *get_symtab_type_string () const; + + /* Add node into symbol table. This function is not used directly, but via + cgraph/varpool node creation routines. */ + void register_symbol (void); + + /* Remove symbol from symbol table. */ + void remove (void); + + /* Dump symtab node to F. */ + void dump (FILE *f); + + /* Dump symtab callgraph in graphviz format. */ + void dump_graphviz (FILE *f); + + /* Dump symtab node to stderr. */ + void DEBUG_FUNCTION debug (void); + + /* Verify consistency of node. */ + void DEBUG_FUNCTION verify (void); + + /* Return ipa reference from this symtab_node to + REFERRED_NODE or REFERRED_VARPOOL_NODE. USE_TYPE specify type + of the use and STMT the statement (if it exists). */ + ipa_ref *create_reference (symtab_node *referred_node, + enum ipa_ref_use use_type); + + /* Return ipa reference from this symtab_node to + REFERRED_NODE or REFERRED_VARPOOL_NODE. USE_TYPE specify type + of the use and STMT the statement (if it exists). */ + ipa_ref *create_reference (symtab_node *referred_node, + enum ipa_ref_use use_type, gimple *stmt); + + /* If VAL is a reference to a function or a variable, add a reference from + this symtab_node to the corresponding symbol table node. Return the new + reference or NULL if none was created. */ + ipa_ref *maybe_create_reference (tree val, gimple *stmt); + + /* Clone all references from symtab NODE to this symtab_node. */ + void clone_references (symtab_node *node); + + /* Remove all stmt references in non-speculative references. + Those are not maintained during inlining & clonning. + The exception are speculative references that are updated along + with callgraph edges associated with them. */ + void clone_referring (symtab_node *node); + + /* Clone reference REF to this symtab_node and set its stmt to STMT. */ + ipa_ref *clone_reference (ipa_ref *ref, gimple *stmt); + + /* Find the structure describing a reference to REFERRED_NODE + and associated with statement STMT. */ + ipa_ref *find_reference (symtab_node *referred_node, gimple *stmt, + unsigned int lto_stmt_uid); + + /* Remove all references that are associated with statement STMT. */ + void remove_stmt_references (gimple *stmt); + + /* Remove all stmt references in non-speculative references. + Those are not maintained during inlining & clonning. + The exception are speculative references that are updated along + with callgraph edges associated with them. */ + void clear_stmts_in_references (void); + + /* Remove all references in ref list. */ + void remove_all_references (void); + + /* Remove all referring items in ref list. */ + void remove_all_referring (void); + + /* Dump references in ref list to FILE. */ + void dump_references (FILE *file); + + /* Dump referring in list to FILE. */ + void dump_referring (FILE *); + + /* Get number of references for this node. */ + inline unsigned num_references (void) + { + return ref_list.references.length (); + } + + /* Iterates I-th reference in the list, REF is also set. */ + ipa_ref *iterate_reference (unsigned i, ipa_ref *&ref); + + /* Iterates I-th referring item in the list, REF is also set. */ + ipa_ref *iterate_referring (unsigned i, ipa_ref *&ref); + + /* Iterates I-th referring alias item in the list, REF is also set. */ + ipa_ref *iterate_direct_aliases (unsigned i, ipa_ref *&ref); + + /* Return true if symtab node and TARGET represents + semantically equivalent symbols. */ + bool semantically_equivalent_p (symtab_node *target); + + /* Classify symbol symtab node for partitioning. */ + enum symbol_partitioning_class get_partitioning_class (void); + + /* Return comdat group. */ + tree get_comdat_group () + { + return x_comdat_group; + } + + /* Return comdat group as identifier_node. */ + tree get_comdat_group_id () + { + if (x_comdat_group && TREE_CODE (x_comdat_group) != IDENTIFIER_NODE) + x_comdat_group = DECL_ASSEMBLER_NAME (x_comdat_group); + return x_comdat_group; + } + + /* Set comdat group. */ + void set_comdat_group (tree group) + { + gcc_checking_assert (!group || TREE_CODE (group) == IDENTIFIER_NODE + || DECL_P (group)); + x_comdat_group = group; + } + + /* Return section as string. */ + const char * get_section () const + { + if (!x_section) + return NULL; + return x_section->name; + } + + /* Remove node from same comdat group. */ + void remove_from_same_comdat_group (void); + + /* Add this symtab_node to the same comdat group that OLD is in. */ + void add_to_same_comdat_group (symtab_node *old_node); + + /* Dissolve the same_comdat_group list in which NODE resides. */ + void dissolve_same_comdat_group_list (void); + + /* Return true when symtab_node is known to be used from other (non-LTO) + object file. Known only when doing LTO via linker plugin. */ + bool used_from_object_file_p (void); + + /* Walk the alias chain to return the symbol NODE is alias of. + If NODE is not an alias, return NODE. + When AVAILABILITY is non-NULL, get minimal availability in the chain. + When REF is non-NULL, assume that reference happens in symbol REF + when determining the availability. */ + symtab_node *ultimate_alias_target (enum availability *avail = NULL, + struct symtab_node *ref = NULL); + + /* Return next reachable static symbol with initializer after NODE. */ + inline symtab_node *next_defined_symbol (void); + + /* Add reference recording that symtab node is alias of TARGET. + If TRANSPARENT is true make the alias to be transparent alias. + The function can fail in the case of aliasing cycles; in this case + it returns false. */ + bool resolve_alias (symtab_node *target, bool transparent = false); + + /* C++ FE sometimes change linkage flags after producing same + body aliases. */ + void fixup_same_cpp_alias_visibility (symtab_node *target); + + /* Call callback on symtab node and aliases associated to this node. + When INCLUDE_OVERWRITABLE is false, overwritable aliases and thunks are + skipped. */ + bool call_for_symbol_and_aliases (bool (*callback) (symtab_node *, void *), + void *data, + bool include_overwrite); + + /* If node cannot be interposable by static or dynamic linker to point to + different definition, return this symbol. Otherwise look for alias with + such property and if none exists, introduce new one. */ + symtab_node *noninterposable_alias (void); + + /* Return node that alias is aliasing. */ + inline symtab_node *get_alias_target (void); + + /* Return DECL that alias is aliasing. */ + inline tree get_alias_target_tree (); + + /* Set section for symbol and its aliases. */ + void set_section (const char *section); + + /* Like set_section, but copying the section name from another node. */ + void set_section (const symtab_node &other); + + /* Set section, do not recurse into aliases. + When one wants to change section of symbol and its aliases, + use set_section. */ + void set_section_for_node (const char *section); + + /* Like set_section_for_node, but copying the section name from another + node. */ + void set_section_for_node (const symtab_node &other); + + /* Set initialization priority to PRIORITY. */ + void set_init_priority (priority_type priority); + + /* Return the initialization priority. */ + priority_type get_init_priority (); + + /* Return availability of NODE when referenced from REF. */ + enum availability get_availability (symtab_node *ref = NULL); + + /* During LTO stream-in this predicate can be used to check whether node + in question prevails in the linking to save some memory usage. */ + bool prevailing_p (void); + + /* Return true if NODE binds to current definition in final executable + when referenced from REF. If REF is NULL return conservative value + for any reference. */ + bool binds_to_current_def_p (symtab_node *ref = NULL); + + /* Make DECL local. */ + void make_decl_local (void); + + /* Copy visibility from N. */ + void copy_visibility_from (symtab_node *n); + + /* Return desired alignment of the definition. This is NOT alignment useful + to access THIS, because THIS may be interposable and DECL_ALIGN should + be used instead. It however must be guaranteed when output definition + of THIS. */ + unsigned int definition_alignment (); + + /* Return true if alignment can be increased. */ + bool can_increase_alignment_p (); + + /* Increase alignment of symbol to ALIGN. */ + void increase_alignment (unsigned int align); + + /* Return true if list contains an alias. */ + bool has_aliases_p (void); + + /* Return true when the symbol is real symbol, i.e. it is not inline clone + or abstract function kept for debug info purposes only. */ + bool real_symbol_p (void); + + /* Return true when the symbol needs to be output to the LTO symbol table. */ + bool output_to_lto_symbol_table_p (void); + + /* Determine if symbol declaration is needed. That is, visible to something + either outside this translation unit, something magic in the system + configury. This function is used just during symbol creation. */ + bool needed_p (void); + + /* Return true if this symbol is a function from the C frontend specified + directly in RTL form (with "__RTL"). */ + bool native_rtl_p () const; + + /* Return true when there are references to the node. */ + bool referred_to_p (bool include_self = true); + + /* Return true if symbol can be discarded by linker from the binary. + Assume that symbol is used (so there is no need to take into account + garbage collecting linkers) + + This can happen for comdats, commons and weaks when they are prevailed + by other definition at static linking time. */ + inline bool + can_be_discarded_p (void) + { + return ((DECL_EXTERNAL (decl) + && !in_other_partition) + || ((get_comdat_group () + || DECL_COMMON (decl) + || (DECL_SECTION_NAME (decl) && DECL_WEAK (decl))) + && ((resolution != LDPR_PREVAILING_DEF + && resolution != LDPR_PREVAILING_DEF_IRONLY_EXP) + || flag_incremental_link) + && resolution != LDPR_PREVAILING_DEF_IRONLY)); + } + + /* Return true if NODE is local to a particular COMDAT group, and must not + be named from outside the COMDAT. This is used for C++ decloned + constructors. */ + inline bool comdat_local_p (void) + { + return (same_comdat_group && !TREE_PUBLIC (decl)); + } + + /* Return true if ONE and TWO are part of the same COMDAT group. */ + inline bool in_same_comdat_group_p (symtab_node *target); + + /* Return true if symbol is known to be nonzero. */ + bool nonzero_address (); + + /* Return 0 if symbol is known to have different address than S2, + Return 1 if symbol is known to have same address as S2, + return 2 otherwise. + + If MEMORY_ACCESSED is true, assume that both memory pointer to THIS + and S2 is going to be accessed. This eliminates the situations when + either THIS or S2 is NULL and is useful for comparing bases when deciding + about memory aliasing. */ + int equal_address_to (symtab_node *s2, bool memory_accessed = false); + + /* Return true if symbol's address may possibly be compared to other + symbol's address. */ + bool address_matters_p (); + + /* Return true if NODE's address can be compared. This use properties + of NODE only and does not look if the address is actually taken in + interesting way. For that use ADDRESS_MATTERS_P instead. */ + bool address_can_be_compared_p (void); + + /* Return symbol table node associated with DECL, if any, + and NULL otherwise. */ + static inline symtab_node *get (const_tree decl) + { + /* Check that we are called for sane type of object - functions + and static or external variables. */ + gcc_checking_assert (TREE_CODE (decl) == FUNCTION_DECL + || (TREE_CODE (decl) == VAR_DECL + && (TREE_STATIC (decl) || DECL_EXTERNAL (decl) + || in_lto_p))); + /* Check that the mapping is sane - perhaps this check can go away, + but at the moment frontends tends to corrupt the mapping by calling + memcpy/memset on the tree nodes. */ + gcc_checking_assert (!decl->decl_with_vis.symtab_node + || decl->decl_with_vis.symtab_node->decl == decl); + return decl->decl_with_vis.symtab_node; + } + + /* Try to find a symtab node for declaration DECL and if it does not + exist or if it corresponds to an inline clone, create a new one. */ + static inline symtab_node * get_create (tree node); + + /* Return the cgraph node that has ASMNAME for its DECL_ASSEMBLER_NAME. + Return NULL if there's no such node. */ + static symtab_node *get_for_asmname (const_tree asmname); + + /* Verify symbol table for internal consistency. */ + static DEBUG_FUNCTION void verify_symtab_nodes (void); + + /* Perform internal consistency checks, if they are enabled. */ + static inline void checking_verify_symtab_nodes (void); + + /* Type of the symbol. */ + ENUM_BITFIELD (symtab_type) type : 8; + + /* The symbols resolution. */ + ENUM_BITFIELD (ld_plugin_symbol_resolution) resolution : 8; + + /*** Flags representing the symbol type. ***/ + + /* True when symbol corresponds to a definition in current unit. + set via finalize_function or finalize_decl */ + unsigned definition : 1; + /* True when symbol is an alias. + Set by assemble_alias. */ + unsigned alias : 1; + /* When true the alias is translated into its target symbol either by GCC + or assembler (it also may just be a duplicate declaration of the same + linker name). + + Currently transparent aliases come in three different flavors + - aliases having the same assembler name as their target (aka duplicated + declarations). In this case the assembler names compare via + assembler_names_equal_p and weakref is false + - aliases that are renamed at a time being output to final file + by varasm.cc. For those DECL_ASSEMBLER_NAME have + IDENTIFIER_TRANSPARENT_ALIAS set and thus also their assembler + name must be unique. + Weakrefs belong to this category when we target assembler without + .weakref directive. + - weakrefs that are renamed by assembler via .weakref directive. + In this case the alias may or may not be definition (depending if + target declaration was seen by the compiler), weakref is set. + Unless we are before renaming statics, assembler names are different. + + Given that we now support duplicate declarations, the second option is + redundant and will be removed. */ + unsigned transparent_alias : 1; + /* True when alias is a weakref. */ + unsigned weakref : 1; + /* C++ frontend produce same body aliases and extra name aliases for + virtual functions and vtables that are obviously equivalent. + Those aliases are bit special, especially because C++ frontend + visibility code is so ugly it cannot get them right at first time + and their visibility needs to be copied from their "masters" at + the end of parsing. */ + unsigned cpp_implicit_alias : 1; + /* The alias is a symbol version. */ + unsigned symver : 1; + /* Set once the definition was analyzed. The list of references and + other properties are built during analysis. */ + unsigned analyzed : 1; + /* Set for write-only variables. */ + unsigned writeonly : 1; + /* Visibility of symbol was used for further optimization; do not + permit further changes. */ + unsigned refuse_visibility_changes : 1; + + /*** Visibility and linkage flags. ***/ + + /* Set when function is visible by other units. */ + unsigned externally_visible : 1; + /* Don't reorder to other symbols having this set. */ + unsigned no_reorder : 1; + /* The symbol will be assumed to be used in an invisible way (like + by an toplevel asm statement). */ + unsigned force_output : 1; + /* Like FORCE_OUTPUT, but in the case it is ABI requiring the symbol to be + exported. Unlike FORCE_OUTPUT this flag gets cleared to symbols promoted + to static and it does not inhibit optimization. */ + unsigned forced_by_abi : 1; + /* True when the name is known to be unique and thus it does not need mangling. */ + unsigned unique_name : 1; + /* Specify whether the section was set by user or by + compiler via -ffunction-sections. */ + unsigned implicit_section : 1; + /* True when body and other characteristics have been removed by + symtab_remove_unreachable_nodes. */ + unsigned body_removed : 1; + /* True when symbol should comply to -fsemantic-interposition flag. */ + unsigned semantic_interposition : 1; + + /*** WHOPR Partitioning flags. + These flags are used at ltrans stage when only part of the callgraph is + available. ***/ + + /* Set when variable is used from other LTRANS partition. */ + unsigned used_from_other_partition : 1; + /* Set when function is available in the other LTRANS partition. + During WPA output it is used to mark nodes that are present in + multiple partitions. */ + unsigned in_other_partition : 1; + + + + /*** other flags. ***/ + + /* Set when symbol has address taken. */ + unsigned address_taken : 1; + /* Set when init priority is set. */ + unsigned in_init_priority_hash : 1; + + /* Set when symbol needs to be streamed into LTO bytecode for LTO, or in case + of offloading, for separate compilation for a different target. */ + unsigned need_lto_streaming : 1; + + /* Set when symbol can be streamed into bytecode for offloading. */ + unsigned offloadable : 1; + + /* Set when symbol is an IFUNC resolver. */ + unsigned ifunc_resolver : 1; + + + /* Ordering of all symtab entries. */ + int order; + + /* Declaration representing the symbol. */ + tree decl; + + /* Linked list of symbol table entries starting with symtab_nodes. */ + symtab_node *next; + symtab_node *previous; + + /* Linked list of symbols with the same asm name. There may be multiple + entries for single symbol name during LTO, because symbols are renamed + only after partitioning. + + Because inline clones are kept in the assembler name has, they also produce + duplicate entries. + + There are also several long standing bugs where frontends and builtin + code produce duplicated decls. */ + symtab_node *next_sharing_asm_name; + symtab_node *previous_sharing_asm_name; + + /* Circular list of nodes in the same comdat group if non-NULL. */ + symtab_node *same_comdat_group; + + /* Vectors of referring and referenced entities. */ + ipa_ref_list GTY((skip)) ref_list; + + /* Alias target. May be either DECL pointer or ASSEMBLER_NAME pointer + depending to what was known to frontend on the creation time. + Once alias is resolved, this pointer become NULL. */ + tree alias_target; + + /* File stream where this node is being written to. */ + struct lto_file_decl_data * lto_file_data; + + PTR GTY ((skip)) aux; + + /* Comdat group the symbol is in. Can be private if GGC allowed that. */ + tree x_comdat_group; + + /* Section name. Again can be private, if allowed. */ + section_hash_entry *x_section; + +protected: + /* Dump base fields of symtab nodes to F. Not to be used directly. */ + void dump_base (FILE *); + + /* Verify common part of symtab node. */ + bool DEBUG_FUNCTION verify_base (void); + + /* Remove node from symbol table. This function is not used directly, but via + cgraph/varpool node removal routines. */ + void unregister (struct clone_info *); + + /* Return the initialization and finalization priority information for + DECL. If there is no previous priority information, a freshly + allocated structure is returned. */ + struct symbol_priority_map *priority_info (void); + + /* Worker for call_for_symbol_and_aliases_1. */ + bool call_for_symbol_and_aliases_1 (bool (*callback) (symtab_node *, void *), + void *data, + bool include_overwrite); +private: + /* Workers for set_section. */ + static bool set_section_from_string (symtab_node *n, void *s); + static bool set_section_from_node (symtab_node *n, void *o); + + /* Worker for symtab_resolve_alias. */ + static bool set_implicit_section (symtab_node *n, void *); + + /* Worker searching noninterposable alias. */ + static bool noninterposable_alias (symtab_node *node, void *data); + + /* Worker for ultimate_alias_target. */ + symtab_node *ultimate_alias_target_1 (enum availability *avail = NULL, + symtab_node *ref = NULL); + + /* Get dump name with normal or assembly name. */ + const char *get_dump_name (bool asm_name_p) const; +}; + +inline void +symtab_node::checking_verify_symtab_nodes (void) +{ + if (flag_checking) + symtab_node::verify_symtab_nodes (); +} + +/* Walk all aliases for NODE. */ +#define FOR_EACH_ALIAS(NODE, ALIAS) \ + for (unsigned ALIAS##_iter_ = 0; \ + (NODE)->iterate_direct_aliases (ALIAS##_iter_, ALIAS); \ + ALIAS##_iter_++) + +/* This is the information that is put into the cgraph local structure + to recover a function. */ +struct lto_file_decl_data; + +extern const char * const cgraph_availability_names[]; +extern const char * const ld_plugin_symbol_resolution_names[]; +extern const char * const tls_model_names[]; + +/* Represent which DECL tree (or reference to such tree) + will be replaced by another tree while versioning. */ +struct GTY(()) ipa_replace_map +{ + /* The new (replacing) tree. */ + tree new_tree; + /* Parameter number to replace, when old_tree is NULL. */ + int parm_num; + /* Set if the newly added reference should not be an address one, but a load + one from the operand of the ADDR_EXPR in NEW_TREE. This is for cases when + the corresponding parameter p is used only as *p. */ + unsigned force_load_ref : 1; +}; + +enum cgraph_simd_clone_arg_type +{ + SIMD_CLONE_ARG_TYPE_VECTOR, + SIMD_CLONE_ARG_TYPE_UNIFORM, + /* These are only for integer/pointer arguments passed by value. */ + SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP, + SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP, + /* These 6 are only for reference type arguments or arguments passed + by reference. */ + SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP, + SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP, + SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP, + SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP, + SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP, + SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP, + SIMD_CLONE_ARG_TYPE_MASK +}; + +/* Function arguments in the original function of a SIMD clone. + Supplementary data for `struct simd_clone'. */ + +struct GTY(()) cgraph_simd_clone_arg { + /* Original function argument as it originally existed in + DECL_ARGUMENTS. */ + tree orig_arg; + + /* orig_arg's function (or for extern functions type from + TYPE_ARG_TYPES). */ + tree orig_type; + + /* If argument is a vector, this holds the vector version of + orig_arg that after adjusting the argument types will live in + DECL_ARGUMENTS. Otherwise, this is NULL. + + This basically holds: + vector(simdlen) __typeof__(orig_arg) new_arg. */ + tree vector_arg; + + /* vector_arg's type (or for extern functions new vector type. */ + tree vector_type; + + /* If argument is a vector, this holds the array where the simd + argument is held while executing the simd clone function. This + is a local variable in the cloned function. Its content is + copied from vector_arg upon entry to the clone. + + This basically holds: + __typeof__(orig_arg) simd_array[simdlen]. */ + tree simd_array; + + /* A SIMD clone's argument can be either linear (constant or + variable), uniform, or vector. */ + enum cgraph_simd_clone_arg_type arg_type; + + /* Variable alignment if available, otherwise 0. */ + unsigned int alignment; + + /* For arg_type SIMD_CLONE_ARG_TYPE_LINEAR_*CONSTANT_STEP this is + the constant linear step, if arg_type is + SIMD_CLONE_ARG_TYPE_LINEAR_*VARIABLE_STEP, this is index of + the uniform argument holding the step, otherwise 0. */ + HOST_WIDE_INT linear_step; +}; + +/* Specific data for a SIMD function clone. */ + +struct GTY(()) cgraph_simd_clone { + /* Number of words in the SIMD lane associated with this clone. */ + poly_uint64 simdlen; + + /* Number of annotated function arguments in `args'. This is + usually the number of named arguments in FNDECL. */ + unsigned int nargs; + + /* Max hardware vector size in bits for integral vectors. */ + poly_uint64 vecsize_int; + + /* Max hardware vector size in bits for floating point vectors. */ + poly_uint64 vecsize_float; + + /* Machine mode of the mask argument(s), if they are to be passed + as bitmasks in integer argument(s). VOIDmode if masks are passed + as vectors of characteristic type. */ + machine_mode mask_mode; + + /* The mangling character for a given vector size. This is used + to determine the ISA mangling bit as specified in the Intel + Vector ABI. */ + unsigned char vecsize_mangle; + + /* True if this is the masked, in-branch version of the clone, + otherwise false. */ + unsigned int inbranch : 1; + + /* Doubly linked list of SIMD clones. */ + cgraph_node *prev_clone, *next_clone; + + /* Original cgraph node the SIMD clones were created for. */ + cgraph_node *origin; + + /* Annotated function arguments for the original function. */ + cgraph_simd_clone_arg GTY((length ("%h.nargs"))) args[1]; +}; + +/* Function Multiversioning info. */ +struct GTY((for_user)) cgraph_function_version_info { + /* The cgraph_node for which the function version info is stored. */ + cgraph_node *this_node; + /* Chains all the semantically identical function versions. The + first function in this chain is the version_info node of the + default function. */ + cgraph_function_version_info *prev; + /* If this version node corresponds to a dispatcher for function + versions, this points to the version info node of the default + function, the first node in the chain. */ + cgraph_function_version_info *next; + /* If this node corresponds to a function version, this points + to the dispatcher function decl, which is the function that must + be called to execute the right function version at run-time. + + If this cgraph node is a dispatcher (if dispatcher_function is + true, in the cgraph_node struct) for function versions, this + points to resolver function, which holds the function body of the + dispatcher. The dispatcher decl is an alias to the resolver + function decl. */ + tree dispatcher_resolver; +}; + +#define DEFCIFCODE(code, type, string) CIF_ ## code, +/* Reasons for inlining failures. */ + +enum cgraph_inline_failed_t { +#include "cif-code.def" + CIF_N_REASONS +}; + +enum cgraph_inline_failed_type_t +{ + CIF_FINAL_NORMAL = 0, + CIF_FINAL_ERROR +}; + +struct cgraph_edge; + +struct cgraph_edge_hasher : ggc_ptr_hash +{ + typedef gimple *compare_type; + + static hashval_t hash (cgraph_edge *); + static hashval_t hash (gimple *); + static bool equal (cgraph_edge *, gimple *); +}; + +/* The cgraph data structure. + Each function decl has assigned cgraph_node listing callees and callers. */ + +struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node +{ + friend class symbol_table; + + /* Constructor. */ + explicit cgraph_node (int uid) + : symtab_node (SYMTAB_FUNCTION), callees (NULL), callers (NULL), + indirect_calls (NULL), + next_sibling_clone (NULL), prev_sibling_clone (NULL), clones (NULL), + clone_of (NULL), call_site_hash (NULL), former_clone_of (NULL), + simdclone (NULL), simd_clones (NULL), ipa_transforms_to_apply (vNULL), + inlined_to (NULL), rtl (NULL), + count (profile_count::uninitialized ()), + count_materialization_scale (REG_BR_PROB_BASE), profile_id (0), + unit_id (0), tp_first_run (0), thunk (false), + used_as_abstract_origin (false), + lowered (false), process (false), frequency (NODE_FREQUENCY_NORMAL), + only_called_at_startup (false), only_called_at_exit (false), + tm_clone (false), dispatcher_function (false), calls_comdat_local (false), + icf_merged (false), nonfreeing_fn (false), merged_comdat (false), + merged_extern_inline (false), parallelized_function (false), + split_part (false), indirect_call_target (false), local (false), + versionable (false), can_change_signature (false), + redefined_extern_inline (false), tm_may_enter_irr (false), + ipcp_clone (false), declare_variant_alt (false), + calls_declare_variant_alt (false), m_uid (uid), m_summary_id (-1) + {} + + /* Remove the node from cgraph and all inline clones inlined into it. + Skip however removal of FORBIDDEN_NODE and return true if it needs to be + removed. This allows to call the function from outer loop walking clone + tree. */ + bool remove_symbol_and_inline_clones (cgraph_node *forbidden_node = NULL); + + /* Record all references from cgraph_node that are taken + in statement STMT. */ + void record_stmt_references (gimple *stmt); + + /* Like cgraph_set_call_stmt but walk the clone tree and update all + clones sharing the same function body. + When WHOLE_SPECULATIVE_EDGES is true, all three components of + speculative edge gets updated. Otherwise we update only direct + call. */ + void set_call_stmt_including_clones (gimple *old_stmt, gcall *new_stmt, + bool update_speculative = true); + + /* Walk the alias chain to return the function cgraph_node is alias of. + Walk through thunk, too. + When AVAILABILITY is non-NULL, get minimal availability in the chain. + When REF is non-NULL, assume that reference happens in symbol REF + when determining the availability. */ + cgraph_node *function_symbol (enum availability *avail = NULL, + struct symtab_node *ref = NULL); + + /* Walk the alias chain to return the function cgraph_node is alias of. + Walk through non virtual thunks, too. Thus we return either a function + or a virtual thunk node. + When AVAILABILITY is non-NULL, get minimal availability in the chain. + When REF is non-NULL, assume that reference happens in symbol REF + when determining the availability. */ + cgraph_node *function_or_virtual_thunk_symbol + (enum availability *avail = NULL, + struct symtab_node *ref = NULL); + + /* Create node representing clone of N executed COUNT times. Decrease + the execution counts from original node too. + The new clone will have decl set to DECL that may or may not be the same + as decl of N. + + When UPDATE_ORIGINAL is true, the counts are subtracted from the original + function's profile to reflect the fact that part of execution is handled + by node. + When CALL_DUPLICATION_HOOK is true, the ipa passes are acknowledged about + the new clone. Otherwise the caller is responsible for doing so later. + + If the new node is being inlined into another one, NEW_INLINED_TO should be + the outline function the new one is (even indirectly) inlined to. + All hooks will see this in node's inlined_to, when invoked. + Can be NULL if the node is not inlined. SUFFIX is string that is appended + to the original name. */ + cgraph_node *create_clone (tree decl, profile_count count, + bool update_original, + vec redirect_callers, + bool call_duplication_hook, + cgraph_node *new_inlined_to, + ipa_param_adjustments *param_adjustments, + const char *suffix = NULL); + + /* Create callgraph node clone with new declaration. The actual body will be + copied later at compilation stage. The name of the new clone will be + constructed from the name of the original node, SUFFIX and NUM_SUFFIX. */ + cgraph_node *create_virtual_clone (const vec &redirect_callers, + vec *tree_map, + ipa_param_adjustments *param_adjustments, + const char * suffix, unsigned num_suffix); + + /* Remove the node from the tree of virtual and inline clones and make it a + standalone node - not a clone any more. */ + void remove_from_clone_tree (); + + /* cgraph node being removed from symbol table; see if its entry can be + replaced by other inline clone. */ + cgraph_node *find_replacement (struct clone_info *); + + /* Create a new cgraph node which is the new version of + callgraph node. REDIRECT_CALLERS holds the callers + edges which should be redirected to point to + NEW_VERSION. ALL the callees edges of the node + are cloned to the new version node. Return the new + version node. + + If non-NULL BLOCK_TO_COPY determine what basic blocks + was copied to prevent duplications of calls that are dead + in the clone. + + SUFFIX is string that is appended to the original name. */ + + cgraph_node *create_version_clone (tree new_decl, + vec redirect_callers, + bitmap bbs_to_copy, + const char *suffix = NULL); + + /* Perform function versioning. + Function versioning includes copying of the tree and + a callgraph update (creating a new cgraph node and updating + its callees and callers). + + REDIRECT_CALLERS varray includes the edges to be redirected + to the new version. + + TREE_MAP is a mapping of tree nodes we want to replace with + new ones (according to results of prior analysis). + + If non-NULL ARGS_TO_SKIP determine function parameters to remove + from new version. + If SKIP_RETURN is true, the new version will return void. + If non-NULL BLOCK_TO_COPY determine what basic blocks to copy. + If non_NULL NEW_ENTRY determine new entry BB of the clone. + + If TARGET_ATTRIBUTES is non-null, when creating a new declaration, + add the attributes to DECL_ATTRIBUTES. And call valid_attribute_p + that will promote value of the attribute DECL_FUNCTION_SPECIFIC_TARGET + of the declaration. + + If VERSION_DECL is set true, use clone_function_name_numbered for the + function clone. Otherwise, use clone_function_name. + + Return the new version's cgraph node. */ + cgraph_node *create_version_clone_with_body + (vec redirect_callers, + vec *tree_map, + ipa_param_adjustments *param_adjustments, + bitmap bbs_to_copy, basic_block new_entry_block, const char *clone_name, + tree target_attributes = NULL_TREE, bool version_decl = true); + + /* Insert a new cgraph_function_version_info node into cgraph_fnver_htab + corresponding to cgraph_node. */ + cgraph_function_version_info *insert_new_function_version (void); + + /* Get the cgraph_function_version_info node corresponding to node. */ + cgraph_function_version_info *function_version (void); + + /* Discover all functions and variables that are trivially needed, analyze + them as well as all functions and variables referred by them */ + void analyze (void); + + /* Add thunk alias into callgraph. The alias declaration is ALIAS and it + aliases DECL with an adjustments made into the first parameter. + See comments in struct symtab-thunks.h for detail on the parameters. */ + cgraph_node * create_thunk (tree alias, tree, bool this_adjusting, + HOST_WIDE_INT fixed_offset, + HOST_WIDE_INT virtual_value, + HOST_WIDE_INT indirect_offset, + tree virtual_offset, + tree real_alias); + + + /* Return node that alias is aliasing. */ + inline cgraph_node *get_alias_target (void); + + /* Given function symbol, walk the alias chain to return the function node + is alias of. Do not walk through thunks. + When AVAILABILITY is non-NULL, get minimal availability in the chain. + When REF is non-NULL, assume that reference happens in symbol REF + when determining the availability. */ + + cgraph_node *ultimate_alias_target (availability *availability = NULL, + symtab_node *ref = NULL); + + /* Call expand_thunk on all callers that are thunks and analyze those + nodes that were expanded. */ + void expand_all_artificial_thunks (); + + /* Assemble thunks and aliases associated to node. */ + void assemble_thunks_and_aliases (void); + + /* Expand function specified by node. */ + void expand (void); + + /* As an GCC extension we allow redefinition of the function. The + semantics when both copies of bodies differ is not well defined. + We replace the old body with new body so in unit at a time mode + we always use new body, while in normal mode we may end up with + old body inlined into some functions and new body expanded and + inlined in others. */ + void reset (void); + + /* Creates a wrapper from cgraph_node to TARGET node. Thunk is used for this + kind of wrapper method. */ + void create_wrapper (cgraph_node *target); + + /* Verify cgraph nodes of the cgraph node. */ + void DEBUG_FUNCTION verify_node (void); + + /* Remove function from symbol table. */ + void remove (void); + + /* Dump call graph node to file F. */ + void dump (FILE *f); + + /* Dump call graph node to file F. */ + void dump_graphviz (FILE *f); + + /* Dump call graph node to stderr. */ + void DEBUG_FUNCTION debug (void); + + /* When doing LTO, read cgraph_node's body from disk if it is not already + present. */ + bool get_untransformed_body (); + + /* Prepare function body. When doing LTO, read cgraph_node's body from disk + if it is not already present. When some IPA transformations are scheduled, + apply them. */ + bool get_body (); + + void materialize_clone (void); + + /* Release memory used to represent body of function. + Use this only for functions that are released before being translated to + target code (i.e. RTL). Functions that are compiled to RTL and beyond + are free'd in final.cc via free_after_compilation(). */ + void release_body (bool keep_arguments = false); + + /* Return the DECL_STRUCT_FUNCTION of the function. */ + struct function *get_fun () const; + + /* Bring cgraph node local. */ + void make_local (void); + + /* Likewise indicate that a node is having address taken. */ + void mark_address_taken (void); + + /* Set finalization priority to PRIORITY. */ + void set_fini_priority (priority_type priority); + + /* Return the finalization priority. */ + priority_type get_fini_priority (void); + + /* Create edge from a given function to CALLEE in the cgraph. */ + cgraph_edge *create_edge (cgraph_node *callee, + gcall *call_stmt, profile_count count, + bool cloning_p = false); + + /* Create an indirect edge with a yet-undetermined callee where the call + statement destination is a formal parameter of the caller with index + PARAM_INDEX. */ + cgraph_edge *create_indirect_edge (gcall *call_stmt, int ecf_flags, + profile_count count, + bool cloning_p = false); + + /* Like cgraph_create_edge walk the clone tree and update all clones sharing + same function body. If clones already have edge for OLD_STMT; only + update the edge same way as cgraph_set_call_stmt_including_clones does. */ + void create_edge_including_clones (cgraph_node *callee, + gimple *old_stmt, gcall *stmt, + profile_count count, + cgraph_inline_failed_t reason); + + /* Return the callgraph edge representing the GIMPLE_CALL statement + CALL_STMT. */ + cgraph_edge *get_edge (gimple *call_stmt); + + /* Collect all callers of cgraph_node and its aliases that are known to lead + to NODE (i.e. are not overwritable) and that are not thunks. */ + auto_vec collect_callers (void); + + /* Remove all callers from the node. */ + void remove_callers (void); + + /* Remove all callees from the node. */ + void remove_callees (void); + + /* Return function availability. See cgraph.h for description of individual + return values. */ + enum availability get_availability (symtab_node *ref = NULL); + + /* Set TREE_NOTHROW on cgraph_node's decl and on aliases of the node + if any to NOTHROW. */ + bool set_nothrow_flag (bool nothrow); + + /* SET DECL_IS_MALLOC on cgraph_node's decl and on aliases of the node + if any. */ + bool set_malloc_flag (bool malloc_p); + + /* SET TREE_THIS_VOLATILE on cgraph_node's decl and on aliases of the node + if any. */ + bool set_noreturn_flag (bool noreturn_p); + + /* If SET_CONST is true, mark function, aliases and thunks to be ECF_CONST. + If SET_CONST if false, clear the flag. + + When setting the flag be careful about possible interposition and + do not set the flag for functions that can be interposed and set pure + flag for functions that can bind to other definition. + + Return true if any change was done. */ + + bool set_const_flag (bool set_const, bool looping); + + /* Set DECL_PURE_P on cgraph_node's decl and on aliases of the node + if any to PURE. + + When setting the flag, be careful about possible interposition. + Return true if any change was done. */ + + bool set_pure_flag (bool pure, bool looping); + + /* Call callback on function and aliases associated to the function. + When INCLUDE_OVERWRITABLE is false, overwritable aliases and thunks are + skipped. */ + + bool call_for_symbol_and_aliases (bool (*callback) (cgraph_node *, + void *), + void *data, bool include_overwritable); + + /* Call callback on cgraph_node, thunks and aliases associated to NODE. + When INCLUDE_OVERWRITABLE is false, overwritable aliases and thunks are + skipped. When EXCLUDE_VIRTUAL_THUNKS is true, virtual thunks are + skipped. */ + bool call_for_symbol_thunks_and_aliases (bool (*callback) (cgraph_node *node, + void *data), + void *data, + bool include_overwritable, + bool exclude_virtual_thunks = false); + + /* Likewise indicate that a node is needed, i.e. reachable via some + external means. */ + inline void mark_force_output (void); + + /* Return true when function can be marked local. */ + bool local_p (void); + + /* Return true if cgraph_node can be made local for API change. + Extern inline functions and C++ COMDAT functions can be made local + at the expense of possible code size growth if function is used in multiple + compilation units. */ + bool can_be_local_p (void); + + /* Return true when cgraph_node cannot return or throw and thus + it is safe to ignore its side effects for IPA analysis. */ + bool cannot_return_p (void); + + /* Return true when function cgraph_node and all its aliases are only called + directly. + i.e. it is not externally visible, address was not taken and + it is not used in any other non-standard way. */ + bool only_called_directly_p (void); + + /* Return true when function is only called directly or it has alias. + i.e. it is not externally visible, address was not taken and + it is not used in any other non-standard way. */ + inline bool only_called_directly_or_aliased_p (void); + + /* Return true when function cgraph_node can be expected to be removed + from program when direct calls in this compilation unit are removed. + + As a special case COMDAT functions are + cgraph_can_remove_if_no_direct_calls_p while the are not + cgraph_only_called_directly_p (it is possible they are called from other + unit) + + This function behaves as cgraph_only_called_directly_p because eliminating + all uses of COMDAT function does not make it necessarily disappear from + the program unless we are compiling whole program or we do LTO. In this + case we know we win since dynamic linking will not really discard the + linkonce section. + + If WILL_INLINE is true, assume that function will be inlined into all the + direct calls. */ + bool will_be_removed_from_program_if_no_direct_calls_p + (bool will_inline = false); + + /* Return true when function can be removed from callgraph + if all direct calls and references are eliminated. The function does + not take into account comdat groups. */ + bool can_remove_if_no_direct_calls_and_refs_p (void); + + /* Return true when function cgraph_node and its aliases can be removed from + callgraph if all direct calls are eliminated. + If WILL_INLINE is true, assume that function will be inlined into all the + direct calls. */ + bool can_remove_if_no_direct_calls_p (bool will_inline = false); + + /* Return true when callgraph node is a function with Gimple body defined + in current unit. Functions can also be define externally or they + can be thunks with no Gimple representation. + + Note that at WPA stage, the function body may not be present in memory. */ + inline bool has_gimple_body_p (void); + + /* Return true if this node represents a former, i.e. an expanded, thunk. */ + bool former_thunk_p (void); + + /* Check if function calls comdat local. This is used to recompute + calls_comdat_local flag after function transformations. */ + bool check_calls_comdat_local_p (); + + /* Return true if function should be optimized for size. */ + enum optimize_size_level optimize_for_size_p (void); + + /* Dump the callgraph to file F. */ + static void dump_cgraph (FILE *f); + + /* Dump the call graph to stderr. */ + static inline + void debug_cgraph (void) + { + dump_cgraph (stderr); + } + + /* Get unique identifier of the node. */ + inline int get_uid () + { + return m_uid; + } + + /* Get summary id of the node. */ + inline int get_summary_id () + { + return m_summary_id; + } + + /* Record that DECL1 and DECL2 are semantically identical function + versions. */ + static void record_function_versions (tree decl1, tree decl2); + + /* Remove the cgraph_function_version_info and cgraph_node for DECL. This + DECL is a duplicate declaration. */ + static void delete_function_version_by_decl (tree decl); + + /* Add the function FNDECL to the call graph. + Unlike finalize_function, this function is intended to be used + by middle end and allows insertion of new function at arbitrary point + of compilation. The function can be either in high, low or SSA form + GIMPLE. + + The function is assumed to be reachable and have address taken (so no + API breaking optimizations are performed on it). + + Main work done by this function is to enqueue the function for later + processing to avoid need the passes to be re-entrant. */ + static void add_new_function (tree fndecl, bool lowered); + + /* Return callgraph node for given symbol and check it is a function. */ + static inline cgraph_node *get (const_tree decl) + { + gcc_checking_assert (TREE_CODE (decl) == FUNCTION_DECL); + return dyn_cast (symtab_node::get (decl)); + } + + /* DECL has been parsed. Take it, queue it, compile it at the whim of the + logic in effect. If NO_COLLECT is true, then our caller cannot stand to + have the garbage collector run at the moment. We would need to either + create a new GC context, or just not compile right now. */ + static void finalize_function (tree, bool); + + /* Return cgraph node assigned to DECL. Create new one when needed. */ + static cgraph_node * create (tree decl); + + /* Try to find a call graph node for declaration DECL and if it does not + exist or if it corresponds to an inline clone, create a new one. */ + static cgraph_node * get_create (tree); + + /* Return local info for the compiled function. */ + static cgraph_node *local_info_node (tree decl); + + /* Return RTL info for the compiled function. */ + static struct cgraph_rtl_info *rtl_info (const_tree); + + /* Return the cgraph node that has ASMNAME for its DECL_ASSEMBLER_NAME. + Return NULL if there's no such node. */ + static cgraph_node *get_for_asmname (tree asmname); + + /* Attempt to mark ALIAS as an alias to DECL. Return alias node if + successful and NULL otherwise. + Same body aliases are output whenever the body of DECL is output, + and cgraph_node::get (ALIAS) transparently + returns cgraph_node::get (DECL). */ + static cgraph_node * create_same_body_alias (tree alias, tree decl); + + /* Verify whole cgraph structure. */ + static void DEBUG_FUNCTION verify_cgraph_nodes (void); + + /* Verify cgraph, if consistency checking is enabled. */ + static inline void checking_verify_cgraph_nodes (void); + + /* Worker to bring NODE local. */ + static bool make_local (cgraph_node *node, void *); + + /* Mark ALIAS as an alias to DECL. DECL_NODE is cgraph node representing + the function body is associated + with (not necessarily cgraph_node (DECL). */ + static cgraph_node *create_alias (tree alias, tree target); + + /* Return true if NODE has thunk. */ + static bool has_thunk_p (cgraph_node *node, void *); + + cgraph_edge *callees; + cgraph_edge *callers; + /* List of edges representing indirect calls with a yet undetermined + callee. */ + cgraph_edge *indirect_calls; + cgraph_node *next_sibling_clone; + cgraph_node *prev_sibling_clone; + cgraph_node *clones; + cgraph_node *clone_of; + /* For functions with many calls sites it holds map from call expression + to the edge to speed up cgraph_edge function. */ + hash_table *GTY(()) call_site_hash; + /* Declaration node used to be clone of. */ + tree former_clone_of; + + /* If this is a SIMD clone, this points to the SIMD specific + information for it. */ + cgraph_simd_clone *simdclone; + /* If this function has SIMD clones, this points to the first clone. */ + cgraph_node *simd_clones; + + /* Interprocedural passes scheduled to have their transform functions + applied next time we execute local pass on them. We maintain it + per-function in order to allow IPA passes to introduce new functions. */ + vec GTY((skip)) ipa_transforms_to_apply; + + /* For inline clones this points to the function they will be + inlined into. */ + cgraph_node *inlined_to; + + struct cgraph_rtl_info *rtl; + + /* Expected number of executions: calculated in profile.cc. */ + profile_count count; + /* How to scale counts at materialization time; used to merge + LTO units with different number of profile runs. */ + int count_materialization_scale; + /* ID assigned by the profiling. */ + unsigned int profile_id; + /* ID of the translation unit. */ + int unit_id; + /* Time profiler: first run of function. */ + int tp_first_run; + + /* True when symbol is a thunk. */ + unsigned thunk : 1; + /* Set when decl is an abstract function pointed to by the + ABSTRACT_DECL_ORIGIN of a reachable function. */ + unsigned used_as_abstract_origin : 1; + /* Set once the function is lowered (i.e. its CFG is built). */ + unsigned lowered : 1; + /* Set once the function has been instantiated and its callee + lists created. */ + unsigned process : 1; + /* How commonly executed the node is. Initialized during branch + probabilities pass. */ + ENUM_BITFIELD (node_frequency) frequency : 2; + /* True when function can only be called at startup (from static ctor). */ + unsigned only_called_at_startup : 1; + /* True when function can only be called at startup (from static dtor). */ + unsigned only_called_at_exit : 1; + /* True when function is the transactional clone of a function which + is called only from inside transactions. */ + /* ?? We should be able to remove this. We have enough bits in + cgraph to calculate it. */ + unsigned tm_clone : 1; + /* True if this decl is a dispatcher for function versions. */ + unsigned dispatcher_function : 1; + /* True if this decl calls a COMDAT-local function. This is set up in + compute_fn_summary and inline_call. */ + unsigned calls_comdat_local : 1; + /* True if node has been created by merge operation in IPA-ICF. */ + unsigned icf_merged: 1; + /* True if call to node can't result in a call to free, munmap or + other operation that could make previously non-trapping memory + accesses trapping. */ + unsigned nonfreeing_fn : 1; + /* True if there was multiple COMDAT bodies merged by lto-symtab. */ + unsigned merged_comdat : 1; + /* True if this def was merged with extern inlines. */ + unsigned merged_extern_inline : 1; + /* True if function was created to be executed in parallel. */ + unsigned parallelized_function : 1; + /* True if function is part split out by ipa-split. */ + unsigned split_part : 1; + /* True if the function appears as possible target of indirect call. */ + unsigned indirect_call_target : 1; + /* Set when function is visible in current compilation unit only and + its address is never taken. */ + unsigned local : 1; + /* False when there is something makes versioning impossible. */ + unsigned versionable : 1; + /* False when function calling convention and signature cannot be changed. + This is the case when __builtin_apply_args is used. */ + unsigned can_change_signature : 1; + /* True when the function has been originally extern inline, but it is + redefined now. */ + unsigned redefined_extern_inline : 1; + /* True if the function may enter serial irrevocable mode. */ + unsigned tm_may_enter_irr : 1; + /* True if this was a clone created by ipa-cp. */ + unsigned ipcp_clone : 1; + /* True if this is the deferred declare variant resolution artificial + function. */ + unsigned declare_variant_alt : 1; + /* True if the function calls declare_variant_alt functions. */ + unsigned calls_declare_variant_alt : 1; + +private: + /* Unique id of the node. */ + int m_uid; + + /* Summary id that is recycled. */ + int m_summary_id; + + /* Worker for call_for_symbol_and_aliases. */ + bool call_for_symbol_and_aliases_1 (bool (*callback) (cgraph_node *, + void *), + void *data, bool include_overwritable); +}; + +/* A cgraph node set is a collection of cgraph nodes. A cgraph node + can appear in multiple sets. */ +struct cgraph_node_set_def +{ + hash_map *map; + vec nodes; +}; + +typedef cgraph_node_set_def *cgraph_node_set; +typedef struct varpool_node_set_def *varpool_node_set; + +struct varpool_node; + +/* A varpool node set is a collection of varpool nodes. A varpool node + can appear in multiple sets. */ +struct varpool_node_set_def +{ + hash_map * map; + vec nodes; +}; + +/* Iterator structure for cgraph node sets. */ +struct cgraph_node_set_iterator +{ + cgraph_node_set set; + unsigned index; +}; + +/* Iterator structure for varpool node sets. */ +struct varpool_node_set_iterator +{ + varpool_node_set set; + unsigned index; +}; + +/* Context of polymorphic call. It represent information about the type of + instance that may reach the call. This is used by ipa-devirt walkers of the + type inheritance graph. */ + +class GTY(()) ipa_polymorphic_call_context { +public: + /* The called object appears in an object of type OUTER_TYPE + at offset OFFSET. When information is not 100% reliable, we + use SPECULATIVE_OUTER_TYPE and SPECULATIVE_OFFSET. */ + HOST_WIDE_INT offset; + HOST_WIDE_INT speculative_offset; + tree outer_type; + tree speculative_outer_type; + /* True if outer object may be in construction or destruction. */ + unsigned maybe_in_construction : 1; + /* True if outer object may be of derived type. */ + unsigned maybe_derived_type : 1; + /* True if speculative outer object may be of derived type. We always + speculate that construction does not happen. */ + unsigned speculative_maybe_derived_type : 1; + /* True if the context is invalid and all calls should be redirected + to BUILTIN_UNREACHABLE. */ + unsigned invalid : 1; + /* True if the outer type is dynamic. */ + unsigned dynamic : 1; + + /* Build empty "I know nothing" context. */ + ipa_polymorphic_call_context (); + /* Build polymorphic call context for indirect call E. */ + ipa_polymorphic_call_context (cgraph_edge *e); + /* Build polymorphic call context for IP invariant CST. + If specified, OTR_TYPE specify the type of polymorphic call + that takes CST+OFFSET as a parameter. */ + ipa_polymorphic_call_context (tree cst, tree otr_type = NULL, + HOST_WIDE_INT offset = 0); + /* Build context for pointer REF contained in FNDECL at statement STMT. + if INSTANCE is non-NULL, return pointer to the object described by + the context. */ + ipa_polymorphic_call_context (tree fndecl, tree ref, gimple *stmt, + tree *instance = NULL); + + /* Look for vtable stores or constructor calls to work out dynamic type + of memory location. */ + bool get_dynamic_type (tree, tree, tree, gimple *, unsigned *); + + /* Make context non-speculative. */ + void clear_speculation (); + + /* Produce context specifying all derived types of OTR_TYPE. If OTR_TYPE is + NULL, the context is set to dummy "I know nothing" setting. */ + void clear_outer_type (tree otr_type = NULL); + + /* Walk container types and modify context to point to actual class + containing OTR_TYPE (if non-NULL) as base class. + Return true if resulting context is valid. + + When CONSIDER_PLACEMENT_NEW is false, reject contexts that may be made + valid only via allocation of new polymorphic type inside by means + of placement new. + + When CONSIDER_BASES is false, only look for actual fields, not base types + of TYPE. */ + bool restrict_to_inner_class (tree otr_type, + bool consider_placement_new = true, + bool consider_bases = true); + + /* Adjust all offsets in contexts by given number of bits. */ + void offset_by (HOST_WIDE_INT); + /* Use when we cannot track dynamic type change. This speculatively assume + type change is not happening. */ + void possible_dynamic_type_change (bool, tree otr_type = NULL); + /* Assume that both THIS and a given context is valid and strengthen THIS + if possible. Return true if any strengthening was made. + If actual type the context is being used in is known, OTR_TYPE should be + set accordingly. This improves quality of combined result. */ + bool combine_with (ipa_polymorphic_call_context, tree otr_type = NULL); + bool meet_with (ipa_polymorphic_call_context, tree otr_type = NULL); + + /* Return TRUE if context is fully useless. */ + bool useless_p () const; + /* Return TRUE if this context conveys the same information as X. */ + bool equal_to (const ipa_polymorphic_call_context &x) const; + + /* Dump human readable context to F. If NEWLINE is true, it will be + terminated by a newline. */ + void dump (FILE *f, bool newline = true) const; + void DEBUG_FUNCTION debug () const; + + /* LTO streaming. */ + void stream_out (struct output_block *) const; + void stream_in (class lto_input_block *, class data_in *data_in); + +private: + bool combine_speculation_with (tree, HOST_WIDE_INT, bool, tree); + bool meet_speculation_with (tree, HOST_WIDE_INT, bool, tree); + void set_by_decl (tree, HOST_WIDE_INT); + bool set_by_invariant (tree, tree, HOST_WIDE_INT); + bool speculation_consistent_p (tree, HOST_WIDE_INT, bool, tree) const; + void make_speculative (tree otr_type = NULL); +}; + +/* Structure containing additional information about an indirect call. */ + +class GTY(()) cgraph_indirect_call_info +{ +public: + /* When agg_content is set, an offset where the call pointer is located + within the aggregate. */ + HOST_WIDE_INT offset; + /* Context of the polymorphic call; use only when POLYMORPHIC flag is set. */ + ipa_polymorphic_call_context context; + /* OBJ_TYPE_REF_TOKEN of a polymorphic call (if polymorphic is set). */ + HOST_WIDE_INT otr_token; + /* Type of the object from OBJ_TYPE_REF_OBJECT. */ + tree otr_type; + /* Index of the parameter that is called. */ + int param_index; + /* ECF flags determined from the caller. */ + int ecf_flags; + + /* Number of speculative call targets, it's less than GCOV_TOPN_VALUES. */ + unsigned num_speculative_call_targets : 16; + + /* Set when the call is a virtual call with the parameter being the + associated object pointer rather than a simple direct call. */ + unsigned polymorphic : 1; + /* Set when the call is a call of a pointer loaded from contents of an + aggregate at offset. */ + unsigned agg_contents : 1; + /* Set when this is a call through a member pointer. */ + unsigned member_ptr : 1; + /* When the agg_contents bit is set, this one determines whether the + destination is loaded from a parameter passed by reference. */ + unsigned by_ref : 1; + /* When the agg_contents bit is set, this one determines whether we can + deduce from the function body that the loaded value from the reference is + never modified between the invocation of the function and the load + point. */ + unsigned guaranteed_unmodified : 1; + /* For polymorphic calls this specify whether the virtual table pointer + may have changed in between function entry and the call. */ + unsigned vptr_changed : 1; +}; + +class GTY((chain_next ("%h.next_caller"), chain_prev ("%h.prev_caller"), + for_user)) cgraph_edge +{ +public: + friend struct cgraph_node; + friend class symbol_table; + + /* Remove EDGE from the cgraph. */ + static void remove (cgraph_edge *edge); + + /* Change field call_stmt of edge E to NEW_STMT. If UPDATE_SPECULATIVE and E + is any component of speculative edge, then update all components. + Speculations can be resolved in the process and EDGE can be removed and + deallocated. Return the edge that now represents the call. */ + static cgraph_edge *set_call_stmt (cgraph_edge *e, gcall *new_stmt, + bool update_speculative = true); + + /* Redirect callee of the edge to N. The function does not update underlying + call expression. */ + void redirect_callee (cgraph_node *n); + + /* If the edge does not lead to a thunk, simply redirect it to N. Otherwise + create one or more equivalent thunks for N and redirect E to the first in + the chain. Note that it is then necessary to call + n->expand_all_artificial_thunks once all callers are redirected. */ + void redirect_callee_duplicating_thunks (cgraph_node *n); + + /* Make an indirect edge with an unknown callee an ordinary edge leading to + CALLEE. Speculations can be resolved in the process and EDGE can be + removed and deallocated. Return the edge that now represents the + call. */ + static cgraph_edge *make_direct (cgraph_edge *edge, cgraph_node *callee); + + /* Turn edge into speculative call calling N2. Update + the profile so the direct call is taken COUNT times + with FREQUENCY. speculative_id is used to link direct calls with their + corresponding IPA_REF_ADDR references when representing speculative calls. + */ + cgraph_edge *make_speculative (cgraph_node *n2, profile_count direct_count, + unsigned int speculative_id = 0); + + /* Speculative call consists of an indirect edge and one or more + direct edge+ref pairs. Speculative will expand to the following sequence: + + if (call_dest == target1) // reference to target1 + target1 (); // direct call to target1 + else if (call_dest == target2) // reference to targt2 + target2 (); // direct call to target2 + else + call_dest (); // indirect call + + Before the expansion we will have indirect call and the direct call+ref + pairs all linked to single statement. + + Note that ref may point to different symbol than the corresponding call + becuase the speculated edge may have been optimized (redirected to + a clone) or inlined. + + Given an edge which is part of speculative call, return the first + direct call edge in the speculative call sequence. + + In the example above called on any cgraph edge in the sequence it will + return direct call to target1. */ + cgraph_edge *first_speculative_call_target (); + + /* Return next speculative call target or NULL if there is none. + All targets are required to form an interval in the callee list. + + In example above, if called on call to target1 it will return call to + target2. */ + cgraph_edge *next_speculative_call_target () + { + cgraph_edge *e = this; + gcc_checking_assert (speculative && callee); + + if (e->next_callee && e->next_callee->speculative + && e->next_callee->call_stmt == e->call_stmt + && e->next_callee->lto_stmt_uid == e->lto_stmt_uid) + return e->next_callee; + return NULL; + } + + /* When called on any edge in the speculative call return the (unique) + indirect call edge in the speculative call sequence. */ + cgraph_edge *speculative_call_indirect_edge () + { + gcc_checking_assert (speculative); + if (!callee) + return this; + for (cgraph_edge *e2 = caller->indirect_calls; + true; e2 = e2->next_callee) + if (e2->speculative + && call_stmt == e2->call_stmt + && lto_stmt_uid == e2->lto_stmt_uid) + return e2; + } + + /* When called on any edge in speculative call and when given any target + of ref which is speculated to it returns the corresponding direct call. + + In example above if called on function target2 it will return call to + target2. */ + cgraph_edge *speculative_call_for_target (cgraph_node *); + + /* Return REF corresponding to direct call in the specualtive call + sequence. */ + ipa_ref *speculative_call_target_ref () + { + ipa_ref *ref; + + gcc_checking_assert (speculative); + for (unsigned int i = 0; caller->iterate_reference (i, ref); i++) + if (ref->speculative && ref->speculative_id == speculative_id + && ref->stmt == (gimple *)call_stmt + && ref->lto_stmt_uid == lto_stmt_uid) + return ref; + gcc_unreachable (); + } + + /* Speculative call edge turned out to be direct call to CALLEE_DECL. Remove + the speculative call sequence and return edge representing the call, the + original EDGE can be removed and deallocated. It is up to caller to + redirect the call as appropriate. Return the edge that now represents the + call. + + For "speculative" indirect call that contains multiple "speculative" + targets (i.e. edge->indirect_info->num_speculative_call_targets > 1), + decrease the count and only remove current direct edge. + + If no speculative direct call left to the speculative indirect call, remove + the speculative of both the indirect call and corresponding direct edge. + + It is up to caller to iteratively resolve each "speculative" direct call + and redirect the call as appropriate. */ + static cgraph_edge *resolve_speculation (cgraph_edge *edge, + tree callee_decl = NULL); + + /* If necessary, change the function declaration in the call statement + associated with edge E so that it corresponds to the edge callee. + Speculations can be resolved in the process and EDGE can be removed and + deallocated. + + The edge could be one of speculative direct call generated from speculative + indirect call. In this circumstance, decrease the speculative targets + count (i.e. num_speculative_call_targets) and redirect call stmt to the + corresponding i-th target. If no speculative direct call left to the + speculative indirect call, remove "speculative" of the indirect call and + also redirect stmt to it's final direct target. + + It is up to caller to iteratively transform each "speculative" + direct call as appropriate. */ + static gimple *redirect_call_stmt_to_callee (cgraph_edge *e); + + /* Create clone of edge in the node N represented + by CALL_EXPR the callgraph. */ + cgraph_edge * clone (cgraph_node *n, gcall *call_stmt, unsigned stmt_uid, + profile_count num, profile_count den, + bool update_original); + + /* Verify edge count and frequency. */ + bool verify_count (); + + /* Return true when call of edge cannot lead to return from caller + and thus it is safe to ignore its side effects for IPA analysis + when computing side effects of the caller. */ + bool cannot_lead_to_return_p (void); + + /* Return true when the edge represents a direct recursion. */ + bool recursive_p (void); + + /* Return true if the edge may be considered hot. */ + bool maybe_hot_p (void); + + /* Get unique identifier of the edge. */ + inline int get_uid () + { + return m_uid; + } + + /* Get summary id of the edge. */ + inline int get_summary_id () + { + return m_summary_id; + } + + /* Rebuild cgraph edges for current function node. This needs to be run after + passes that don't update the cgraph. */ + static unsigned int rebuild_edges (void); + + /* Rebuild cgraph references for current function node. This needs to be run + after passes that don't update the cgraph. */ + static void rebuild_references (void); + + /* During LTO stream in this can be used to check whether call can possibly + be internal to the current translation unit. */ + bool possibly_call_in_translation_unit_p (void); + + /* Return num_speculative_targets of this edge. */ + int num_speculative_call_targets_p (void); + + /* Expected number of executions: calculated in profile.cc. */ + profile_count count; + cgraph_node *caller; + cgraph_node *callee; + cgraph_edge *prev_caller; + cgraph_edge *next_caller; + cgraph_edge *prev_callee; + cgraph_edge *next_callee; + gcall *call_stmt; + /* Additional information about an indirect call. Not cleared when an edge + becomes direct. */ + cgraph_indirect_call_info *indirect_info; + PTR GTY ((skip (""))) aux; + /* When equal to CIF_OK, inline this call. Otherwise, points to the + explanation why function was not inlined. */ + enum cgraph_inline_failed_t inline_failed; + /* The stmt_uid of call_stmt. This is used by LTO to recover the call_stmt + when the function is serialized in. */ + unsigned int lto_stmt_uid; + /* speculative id is used to link direct calls with their corresponding + IPA_REF_ADDR references when representing speculative calls. */ + unsigned int speculative_id : 16; + /* Whether this edge was made direct by indirect inlining. */ + unsigned int indirect_inlining_edge : 1; + /* Whether this edge describes an indirect call with an undetermined + callee. */ + unsigned int indirect_unknown_callee : 1; + /* Whether this edge is still a dangling */ + /* True if the corresponding CALL stmt cannot be inlined. */ + unsigned int call_stmt_cannot_inline_p : 1; + /* Can this call throw externally? */ + unsigned int can_throw_external : 1; + /* Edges with SPECULATIVE flag represents indirect calls that was + speculatively turned into direct (i.e. by profile feedback). + The final code sequence will have form: + + if (call_target == expected_fn) + expected_fn (); + else + call_target (); + + Every speculative call is represented by three components attached + to a same call statement: + 1) a direct call (to expected_fn) + 2) an indirect call (to call_target) + 3) a IPA_REF_ADDR reference to expected_fn. + + Optimizers may later redirect direct call to clone, so 1) and 3) + do not need to necessarily agree with destination. */ + unsigned int speculative : 1; + /* Set to true when caller is a constructor or destructor of polymorphic + type. */ + unsigned in_polymorphic_cdtor : 1; + + /* Return true if call must bind to current definition. */ + bool binds_to_current_def_p (); + + /* Expected frequency of executions within the function. + When set to CGRAPH_FREQ_BASE, the edge is expected to be called once + per function call. The range is 0 to CGRAPH_FREQ_MAX. */ + int frequency (); + + /* Expected frequency of executions within the function. */ + sreal sreal_frequency (); +private: + /* Unique id of the edge. */ + int m_uid; + + /* Summary id that is recycled. */ + int m_summary_id; + + /* Remove the edge from the list of the callers of the callee. */ + void remove_caller (void); + + /* Remove the edge from the list of the callees of the caller. */ + void remove_callee (void); + + /* Set callee N of call graph edge and add it to the corresponding set of + callers. */ + void set_callee (cgraph_node *n); + + /* Output flags of edge to a file F. */ + void dump_edge_flags (FILE *f); + + /* Dump edge to stderr. */ + void DEBUG_FUNCTION debug (void); + + /* Verify that call graph edge corresponds to DECL from the associated + statement. Return true if the verification should fail. */ + bool verify_corresponds_to_fndecl (tree decl); +}; + +#define CGRAPH_FREQ_BASE 1000 +#define CGRAPH_FREQ_MAX 100000 + +/* The varpool data structure. + Each static variable decl has assigned varpool_node. */ + +struct GTY((tag ("SYMTAB_VARIABLE"))) varpool_node : public symtab_node +{ + /* Constructor. */ + explicit varpool_node () + : symtab_node (SYMTAB_VARIABLE), output (0), dynamically_initialized (0), + tls_model (TLS_MODEL_NONE), used_by_single_function (0) + {} + + /* Dump given varpool node to F. */ + void dump (FILE *f); + + /* Dump given varpool node to stderr. */ + void DEBUG_FUNCTION debug (void); + + /* Remove variable from symbol table. */ + void remove (void); + + /* Remove node initializer when it is no longer needed. */ + void remove_initializer (void); + + void analyze (void); + + /* Return variable availability. */ + availability get_availability (symtab_node *ref = NULL); + + /* When doing LTO, read variable's constructor from disk if + it is not already present. */ + tree get_constructor (void); + + /* Return true if variable has constructor that can be used for folding. */ + bool ctor_useable_for_folding_p (void); + + /* For given variable pool node, walk the alias chain to return the function + the variable is alias of. Do not walk through thunks. + When AVAILABILITY is non-NULL, get minimal availability in the chain. + When REF is non-NULL, assume that reference happens in symbol REF + when determining the availability. */ + inline varpool_node *ultimate_alias_target + (availability *availability = NULL, symtab_node *ref = NULL); + + /* Return node that alias is aliasing. */ + inline varpool_node *get_alias_target (void); + + /* Output one variable, if necessary. Return whether we output it. */ + bool assemble_decl (void); + + /* For variables in named sections make sure get_variable_section + is called before we switch to those sections. Then section + conflicts between read-only and read-only requiring relocations + sections can be resolved. */ + void finalize_named_section_flags (void); + + /* Call callback on varpool symbol and aliases associated to varpool symbol. + When INCLUDE_OVERWRITABLE is false, overwritable aliases and thunks are + skipped. */ + bool call_for_symbol_and_aliases (bool (*callback) (varpool_node *, void *), + void *data, + bool include_overwritable); + + /* Return true when variable should be considered externally visible. */ + bool externally_visible_p (void); + + /* Return true when all references to variable must be visible + in ipa_ref_list. + i.e. if the variable is not externally visible or not used in some magic + way (asm statement or such). + The magic uses are all summarized in force_output flag. */ + inline bool all_refs_explicit_p (); + + /* Return true when variable can be removed from variable pool + if all direct calls are eliminated. */ + inline bool can_remove_if_no_refs_p (void); + + /* Add the variable DECL to the varpool. + Unlike finalize_decl function is intended to be used + by middle end and allows insertion of new variable at arbitrary point + of compilation. */ + static void add (tree decl); + + /* Return varpool node for given symbol and check it is a function. */ + static inline varpool_node *get (const_tree decl); + + /* Mark DECL as finalized. By finalizing the declaration, frontend instruct + the middle end to output the variable to asm file, if needed or externally + visible. */ + static void finalize_decl (tree decl); + + /* Attempt to mark ALIAS as an alias to DECL. Return TRUE if successful. + Extra name aliases are output whenever DECL is output. */ + static varpool_node * create_extra_name_alias (tree alias, tree decl); + + /* Attempt to mark ALIAS as an alias to DECL. Return TRUE if successful. + Extra name aliases are output whenever DECL is output. */ + static varpool_node * create_alias (tree, tree); + + /* Dump the variable pool to F. */ + static void dump_varpool (FILE *f); + + /* Dump the variable pool to stderr. */ + static void DEBUG_FUNCTION debug_varpool (void); + + /* Allocate new callgraph node and insert it into basic data structures. */ + static varpool_node *create_empty (void); + + /* Return varpool node assigned to DECL. Create new one when needed. */ + static varpool_node *get_create (tree decl); + + /* Given an assembler name, lookup node. */ + static varpool_node *get_for_asmname (tree asmname); + + /* Set when variable is scheduled to be assembled. */ + unsigned output : 1; + + /* Set if the variable is dynamically initialized, except for + function local statics. */ + unsigned dynamically_initialized : 1; + + ENUM_BITFIELD(tls_model) tls_model : 3; + + /* Set if the variable is known to be used by single function only. + This is computed by ipa_single_use pass and used by late optimizations + in places where optimization would be valid for local static variable + if we did not do any inter-procedural code movement. */ + unsigned used_by_single_function : 1; + +private: + /* Assemble thunks and aliases associated to varpool node. */ + void assemble_aliases (void); + + /* Worker for call_for_node_and_aliases. */ + bool call_for_symbol_and_aliases_1 (bool (*callback) (varpool_node *, void *), + void *data, + bool include_overwritable); +}; + +/* Every top level asm statement is put into a asm_node. */ + +struct GTY(()) asm_node { + /* Next asm node. */ + asm_node *next; + /* String for this asm node. */ + tree asm_str; + /* Ordering of all cgraph nodes. */ + int order; +}; + +/* Report whether or not THIS symtab node is a function, aka cgraph_node. */ + +template <> +template <> +inline bool +is_a_helper ::test (symtab_node *p) +{ + return p && p->type == SYMTAB_FUNCTION; +} + +/* Report whether or not THIS symtab node is a variable, aka varpool_node. */ + +template <> +template <> +inline bool +is_a_helper ::test (symtab_node *p) +{ + return p && p->type == SYMTAB_VARIABLE; +} + +typedef void (*cgraph_edge_hook)(cgraph_edge *, void *); +typedef void (*cgraph_node_hook)(cgraph_node *, void *); +typedef void (*varpool_node_hook)(varpool_node *, void *); +typedef void (*cgraph_2edge_hook)(cgraph_edge *, cgraph_edge *, void *); +typedef void (*cgraph_2node_hook)(cgraph_node *, cgraph_node *, void *); + +struct cgraph_edge_hook_list; +struct cgraph_node_hook_list; +struct varpool_node_hook_list; +struct cgraph_2edge_hook_list; +struct cgraph_2node_hook_list; + +/* Map from a symbol to initialization/finalization priorities. */ +struct GTY(()) symbol_priority_map { + priority_type init; + priority_type fini; +}; + +enum symtab_state +{ + /* Frontend is parsing and finalizing functions. */ + PARSING, + /* Callgraph is being constructed. It is safe to add new functions. */ + CONSTRUCTION, + /* Callgraph is being streamed-in at LTO time. */ + LTO_STREAMING, + /* Callgraph is built and early IPA passes are being run. */ + IPA, + /* Callgraph is built and all functions are transformed to SSA form. */ + IPA_SSA, + /* All inline decisions are done; it is now possible to remove extern inline + functions and virtual call targets. */ + IPA_SSA_AFTER_INLINING, + /* Functions are now ordered and being passed to RTL expanders. */ + EXPANSION, + /* All cgraph expansion is done. */ + FINISHED +}; + +struct asmname_hasher : ggc_ptr_hash +{ + typedef const_tree compare_type; + + static hashval_t hash (symtab_node *n); + static bool equal (symtab_node *n, const_tree t); +}; + +/* Core summaries maintained about symbols. */ + +struct thunk_info; +template class function_summary; +typedef function_summary thunk_summary; + +struct clone_info; +template class function_summary; +typedef function_summary clone_summary; + +class GTY((tag ("SYMTAB"))) symbol_table +{ +public: + friend struct symtab_node; + friend struct cgraph_node; + friend struct cgraph_edge; + + symbol_table (): + cgraph_count (0), cgraph_max_uid (1), cgraph_max_summary_id (0), + edges_count (0), edges_max_uid (1), edges_max_summary_id (0), + cgraph_released_summary_ids (), edge_released_summary_ids (), + nodes (NULL), asmnodes (NULL), asm_last_node (NULL), + order (0), max_unit (0), global_info_ready (false), state (PARSING), + function_flags_ready (false), cpp_implicit_aliases_done (false), + section_hash (NULL), assembler_name_hash (NULL), init_priority_hash (NULL), + dump_file (NULL), ipa_clones_dump_file (NULL), cloned_nodes (), + m_thunks (NULL), m_clones (NULL), + m_first_edge_removal_hook (NULL), m_first_cgraph_removal_hook (NULL), + m_first_edge_duplicated_hook (NULL), m_first_cgraph_duplicated_hook (NULL), + m_first_cgraph_insertion_hook (NULL), m_first_varpool_insertion_hook (NULL), + m_first_varpool_removal_hook (NULL) + { + } + + /* Initialize callgraph dump file. */ + void initialize (void); + + /* Register a top-level asm statement ASM_STR. */ + inline asm_node *finalize_toplevel_asm (tree asm_str); + + /* Analyze the whole compilation unit once it is parsed completely. */ + void finalize_compilation_unit (void); + + /* C++ frontend produce same body aliases all over the place, even before PCH + gets streamed out. It relies on us linking the aliases with their function + in order to do the fixups, but ipa-ref is not PCH safe. Consequently we + first produce aliases without links, but once C++ FE is sure it won't + stream PCH we build the links via this function. */ + void process_same_body_aliases (void); + + /* Perform simple optimizations based on callgraph. */ + void compile (void); + + /* Process CGRAPH_NEW_FUNCTIONS and perform actions necessary to add these + functions into callgraph in a way so they look like ordinary reachable + functions inserted into callgraph already at construction time. */ + void process_new_functions (void); + + /* Register a symbol NODE. */ + inline void register_symbol (symtab_node *node); + + inline void + clear_asm_symbols (void) + { + asmnodes = NULL; + asm_last_node = NULL; + } + + /* Perform reachability analysis and reclaim all unreachable nodes. */ + bool remove_unreachable_nodes (FILE *file); + + /* Optimization of function bodies might've rendered some variables as + unnecessary so we want to avoid these from being compiled. Re-do + reachability starting from variables that are either externally visible + or was referred from the asm output routines. */ + void remove_unreferenced_decls (void); + + /* Unregister a symbol NODE. */ + inline void unregister (symtab_node *node); + + /* Allocate new callgraph node and insert it into basic data structures. */ + cgraph_node *create_empty (void); + + /* Release a callgraph NODE. */ + void release_symbol (cgraph_node *node); + + /* Output all variables enqueued to be assembled. */ + bool output_variables (void); + + /* Weakrefs may be associated to external decls and thus not output + at expansion time. Emit all necessary aliases. */ + void output_weakrefs (void); + + /* Return first static symbol with definition. */ + inline symtab_node *first_symbol (void); + + /* Return first assembler symbol. */ + inline asm_node * + first_asm_symbol (void) + { + return asmnodes; + } + + /* Return first static symbol with definition. */ + inline symtab_node *first_defined_symbol (void); + + /* Return first variable. */ + inline varpool_node *first_variable (void); + + /* Return next variable after NODE. */ + inline varpool_node *next_variable (varpool_node *node); + + /* Return first static variable with initializer. */ + inline varpool_node *first_static_initializer (void); + + /* Return next static variable with initializer after NODE. */ + inline varpool_node *next_static_initializer (varpool_node *node); + + /* Return first static variable with definition. */ + inline varpool_node *first_defined_variable (void); + + /* Return next static variable with definition after NODE. */ + inline varpool_node *next_defined_variable (varpool_node *node); + + /* Return first function with body defined. */ + inline cgraph_node *first_defined_function (void); + + /* Return next function with body defined after NODE. */ + inline cgraph_node *next_defined_function (cgraph_node *node); + + /* Return first function. */ + inline cgraph_node *first_function (void); + + /* Return next function. */ + inline cgraph_node *next_function (cgraph_node *node); + + /* Return first function with body defined. */ + cgraph_node *first_function_with_gimple_body (void); + + /* Return next reachable static variable with initializer after NODE. */ + inline cgraph_node *next_function_with_gimple_body (cgraph_node *node); + + /* Register HOOK to be called with DATA on each removed edge. */ + cgraph_edge_hook_list *add_edge_removal_hook (cgraph_edge_hook hook, + void *data); + + /* Remove ENTRY from the list of hooks called on removing edges. */ + void remove_edge_removal_hook (cgraph_edge_hook_list *entry); + + /* Register HOOK to be called with DATA on each removed node. */ + cgraph_node_hook_list *add_cgraph_removal_hook (cgraph_node_hook hook, + void *data); + + /* Remove ENTRY from the list of hooks called on removing nodes. */ + void remove_cgraph_removal_hook (cgraph_node_hook_list *entry); + + /* Register HOOK to be called with DATA on each removed node. */ + varpool_node_hook_list *add_varpool_removal_hook (varpool_node_hook hook, + void *data); + + /* Remove ENTRY from the list of hooks called on removing nodes. */ + void remove_varpool_removal_hook (varpool_node_hook_list *entry); + + /* Register HOOK to be called with DATA on each inserted node. */ + cgraph_node_hook_list *add_cgraph_insertion_hook (cgraph_node_hook hook, + void *data); + + /* Remove ENTRY from the list of hooks called on inserted nodes. */ + void remove_cgraph_insertion_hook (cgraph_node_hook_list *entry); + + /* Register HOOK to be called with DATA on each inserted node. */ + varpool_node_hook_list *add_varpool_insertion_hook (varpool_node_hook hook, + void *data); + + /* Remove ENTRY from the list of hooks called on inserted nodes. */ + void remove_varpool_insertion_hook (varpool_node_hook_list *entry); + + /* Register HOOK to be called with DATA on each duplicated edge. */ + cgraph_2edge_hook_list *add_edge_duplication_hook (cgraph_2edge_hook hook, + void *data); + /* Remove ENTRY from the list of hooks called on duplicating edges. */ + void remove_edge_duplication_hook (cgraph_2edge_hook_list *entry); + + /* Register HOOK to be called with DATA on each duplicated node. */ + cgraph_2node_hook_list *add_cgraph_duplication_hook (cgraph_2node_hook hook, + void *data); + + /* Remove ENTRY from the list of hooks called on duplicating nodes. */ + void remove_cgraph_duplication_hook (cgraph_2node_hook_list *entry); + + /* Call all edge removal hooks. */ + void call_edge_removal_hooks (cgraph_edge *e); + + /* Call all node insertion hooks. */ + void call_cgraph_insertion_hooks (cgraph_node *node); + + /* Call all node removal hooks. */ + void call_cgraph_removal_hooks (cgraph_node *node); + + /* Call all node duplication hooks. */ + void call_cgraph_duplication_hooks (cgraph_node *node, cgraph_node *node2); + + /* Call all edge duplication hooks. */ + void call_edge_duplication_hooks (cgraph_edge *cs1, cgraph_edge *cs2); + + /* Call all node removal hooks. */ + void call_varpool_removal_hooks (varpool_node *node); + + /* Call all node insertion hooks. */ + void call_varpool_insertion_hooks (varpool_node *node); + + /* Arrange node to be first in its entry of assembler_name_hash. */ + void symtab_prevail_in_asm_name_hash (symtab_node *node); + + /* Initialize asm name hash unless. */ + void symtab_initialize_asm_name_hash (void); + + /* Set the DECL_ASSEMBLER_NAME and update symtab hashtables. */ + void change_decl_assembler_name (tree decl, tree name); + + /* Dump symbol table to F. */ + void dump (FILE *f); + + /* Dump symbol table to F in graphviz format. */ + void dump_graphviz (FILE *f); + + /* Dump symbol table to stderr. */ + void DEBUG_FUNCTION debug (void); + + /* Assign a new summary ID for the callgraph NODE. */ + inline int assign_summary_id (cgraph_node *node) + { + if (!cgraph_released_summary_ids.is_empty ()) + node->m_summary_id = cgraph_released_summary_ids.pop (); + else + node->m_summary_id = cgraph_max_summary_id++; + + return node->m_summary_id; + } + + /* Assign a new summary ID for the callgraph EDGE. */ + inline int assign_summary_id (cgraph_edge *edge) + { + if (!edge_released_summary_ids.is_empty ()) + edge->m_summary_id = edge_released_summary_ids.pop (); + else + edge->m_summary_id = edges_max_summary_id++; + + return edge->m_summary_id; + } + + /* Return true if assembler names NAME1 and NAME2 leads to the same symbol + name. */ + static bool assembler_names_equal_p (const char *name1, const char *name2); + + int cgraph_count; + int cgraph_max_uid; + int cgraph_max_summary_id; + + int edges_count; + int edges_max_uid; + int edges_max_summary_id; + + /* Vector of released summary IDS for cgraph nodes. */ + vec GTY ((skip)) cgraph_released_summary_ids; + + /* Vector of released summary IDS for cgraph nodes. */ + vec GTY ((skip)) edge_released_summary_ids; + + /* Return symbol used to separate symbol name from suffix. */ + static char symbol_suffix_separator (); + + symtab_node* GTY(()) nodes; + asm_node* GTY(()) asmnodes; + asm_node* GTY(()) asm_last_node; + + /* The order index of the next symtab node to be created. This is + used so that we can sort the cgraph nodes in order by when we saw + them, to support -fno-toplevel-reorder. */ + int order; + + /* Maximal unit ID used. */ + int max_unit; + + /* Set when whole unit has been analyzed so we can access global info. */ + bool global_info_ready; + /* What state callgraph is in right now. */ + enum symtab_state state; + /* Set when the cgraph is fully build and the basic flags are computed. */ + bool function_flags_ready; + + bool cpp_implicit_aliases_done; + + /* Hash table used to hold sections. */ + hash_table *GTY(()) section_hash; + + /* Hash table used to convert assembler names into nodes. */ + hash_table *assembler_name_hash; + + /* Hash table used to hold init priorities. */ + hash_map *init_priority_hash; + + FILE* GTY ((skip)) dump_file; + + FILE* GTY ((skip)) ipa_clones_dump_file; + + hash_set GTY ((skip)) cloned_nodes; + + /* Thunk annotations. */ + thunk_summary *m_thunks; + + /* Virtual clone annotations. */ + clone_summary *m_clones; + +private: + /* Allocate a cgraph_edge structure and fill it with data according to the + parameters of which only CALLEE can be NULL (when creating an indirect + call edge). CLONING_P should be set if properties that are copied from an + original edge should not be calculated. */ + cgraph_edge *create_edge (cgraph_node *caller, cgraph_node *callee, + gcall *call_stmt, profile_count count, + bool indir_unknown_callee, bool cloning_p); + + /* Put the edge onto the free list. */ + void free_edge (cgraph_edge *e); + + /* Insert NODE to assembler name hash. */ + void insert_to_assembler_name_hash (symtab_node *node, bool with_clones); + + /* Remove NODE from assembler name hash. */ + void unlink_from_assembler_name_hash (symtab_node *node, bool with_clones); + + /* Hash asmnames ignoring the user specified marks. */ + static hashval_t decl_assembler_name_hash (const_tree asmname); + + /* Compare ASMNAME with the DECL_ASSEMBLER_NAME of DECL. */ + static bool decl_assembler_name_equal (tree decl, const_tree asmname); + + friend struct asmname_hasher; + + /* List of hooks triggered when an edge is removed. */ + cgraph_edge_hook_list * GTY((skip)) m_first_edge_removal_hook; + /* List of hooks trigger_red when a cgraph node is removed. */ + cgraph_node_hook_list * GTY((skip)) m_first_cgraph_removal_hook; + /* List of hooks triggered when an edge is duplicated. */ + cgraph_2edge_hook_list * GTY((skip)) m_first_edge_duplicated_hook; + /* List of hooks triggered when a node is duplicated. */ + cgraph_2node_hook_list * GTY((skip)) m_first_cgraph_duplicated_hook; + /* List of hooks triggered when an function is inserted. */ + cgraph_node_hook_list * GTY((skip)) m_first_cgraph_insertion_hook; + /* List of hooks triggered when an variable is inserted. */ + varpool_node_hook_list * GTY((skip)) m_first_varpool_insertion_hook; + /* List of hooks triggered when a node is removed. */ + varpool_node_hook_list * GTY((skip)) m_first_varpool_removal_hook; +}; + +extern GTY(()) symbol_table *symtab; + +extern vec cgraph_new_nodes; + +inline hashval_t +asmname_hasher::hash (symtab_node *n) +{ + return symbol_table::decl_assembler_name_hash + (DECL_ASSEMBLER_NAME (n->decl)); +} + +inline bool +asmname_hasher::equal (symtab_node *n, const_tree t) +{ + return symbol_table::decl_assembler_name_equal (n->decl, t); +} + +/* In cgraph.cc */ +void cgraph_cc_finalize (void); +void release_function_body (tree); +cgraph_indirect_call_info *cgraph_allocate_init_indirect_info (void); + +void cgraph_update_edges_for_call_stmt (gimple *, tree, gimple *); +bool cgraph_function_possibly_inlined_p (tree); + +const char* cgraph_inline_failed_string (cgraph_inline_failed_t); +cgraph_inline_failed_type_t cgraph_inline_failed_type (cgraph_inline_failed_t); + +/* In cgraphunit.cc */ +void cgraphunit_cc_finalize (void); +int tp_first_run_node_cmp (const void *pa, const void *pb); + +/* In symtab-thunks.cc */ +void symtab_thunks_cc_finalize (void); + +/* Initialize datastructures so DECL is a function in lowered gimple form. + IN_SSA is true if the gimple is in SSA. */ +basic_block init_lowered_empty_function (tree, bool, profile_count); + +tree thunk_adjust (gimple_stmt_iterator *, tree, bool, HOST_WIDE_INT, tree, + HOST_WIDE_INT); +/* In cgraphclones.cc */ + +tree clone_function_name_numbered (const char *name, const char *suffix); +tree clone_function_name_numbered (tree decl, const char *suffix); +tree clone_function_name (const char *name, const char *suffix, + unsigned long number); +tree clone_function_name (tree decl, const char *suffix, + unsigned long number); +tree clone_function_name (tree decl, const char *suffix); + +void tree_function_versioning (tree, tree, vec *, + ipa_param_adjustments *, + bool, bitmap, basic_block); + +void dump_callgraph_transformation (const cgraph_node *original, + const cgraph_node *clone, + const char *suffix); +/* In cgraphbuild.cc */ +int compute_call_stmt_bb_frequency (tree, basic_block bb); +void record_references_in_initializer (tree, bool); + +/* In ipa.cc */ +void cgraph_build_static_cdtor (char which, tree body, int priority); +bool ipa_discover_variable_flags (void); + +/* In varpool.cc */ +tree ctor_for_folding (tree); + +/* In ipa-inline-analysis.cc */ +void initialize_inline_failed (struct cgraph_edge *); +bool speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining); + +/* Return true when the symbol is real symbol, i.e. it is not inline clone + or abstract function kept for debug info purposes only. */ +inline bool +symtab_node::real_symbol_p (void) +{ + cgraph_node *cnode; + + if (DECL_ABSTRACT_P (decl)) + return false; + if (transparent_alias && definition) + return false; + if (!is_a (this)) + return true; + cnode = dyn_cast (this); + if (cnode->inlined_to) + return false; + return true; +} + +/* Return true if DECL should have entry in symbol table if used. + Those are functions and static & external variables. */ + +static inline bool +decl_in_symtab_p (const_tree decl) +{ + return (TREE_CODE (decl) == FUNCTION_DECL + || (TREE_CODE (decl) == VAR_DECL + && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))); +} + +inline bool +symtab_node::in_same_comdat_group_p (symtab_node *target) +{ + symtab_node *source = this; + + if (cgraph_node *cn = dyn_cast (target)) + { + if (cn->inlined_to) + source = cn->inlined_to; + } + if (cgraph_node *cn = dyn_cast (target)) + { + if (cn->inlined_to) + target = cn->inlined_to; + } + + return source->get_comdat_group () == target->get_comdat_group (); +} + +/* Return node that alias is aliasing. */ + +inline symtab_node * +symtab_node::get_alias_target (void) +{ + ipa_ref *ref = NULL; + iterate_reference (0, ref); + gcc_checking_assert (ref->use == IPA_REF_ALIAS); + return ref->referred; +} + +/* Return the DECL (or identifier) that alias is aliasing. Unlike the above, + this works whether or not the alias has been analyzed already. */ + +inline tree +symtab_node::get_alias_target_tree () +{ + if (alias_target) + return alias_target; + return get_alias_target ()->decl; +} + +/* Return next reachable static symbol with initializer after the node. */ + +inline symtab_node * +symtab_node::next_defined_symbol (void) +{ + symtab_node *node1 = next; + + for (; node1; node1 = node1->next) + if (node1->definition) + return node1; + + return NULL; +} + +/* Iterates I-th reference in the list, REF is also set. */ + +inline ipa_ref * +symtab_node::iterate_reference (unsigned i, ipa_ref *&ref) +{ + ref_list.references.iterate (i, &ref); + + return ref; +} + +/* Iterates I-th referring item in the list, REF is also set. */ + +inline ipa_ref * +symtab_node::iterate_referring (unsigned i, ipa_ref *&ref) +{ + ref_list.referring.iterate (i, &ref); + + return ref; +} + +/* Iterates I-th referring alias item in the list, REF is also set. */ + +inline ipa_ref * +symtab_node::iterate_direct_aliases (unsigned i, ipa_ref *&ref) +{ + ref_list.referring.iterate (i, &ref); + + if (ref && ref->use != IPA_REF_ALIAS) + return NULL; + + return ref; +} + +/* Return true if list contains an alias. */ + +inline bool +symtab_node::has_aliases_p (void) +{ + ipa_ref *ref = NULL; + + return (iterate_direct_aliases (0, ref) != NULL); +} + +/* Return true when RESOLUTION indicate that linker will use + the symbol from non-LTO object files. */ + +inline bool +resolution_used_from_other_file_p (enum ld_plugin_symbol_resolution resolution) +{ + return (resolution == LDPR_PREVAILING_DEF + || resolution == LDPR_PREEMPTED_REG + || resolution == LDPR_RESOLVED_EXEC + || resolution == LDPR_RESOLVED_DYN); +} + +/* Return true when symtab_node is known to be used from other (non-LTO) + object file. Known only when doing LTO via linker plugin. */ + +inline bool +symtab_node::used_from_object_file_p (void) +{ + if (!TREE_PUBLIC (decl) || DECL_EXTERNAL (decl)) + return false; + if (resolution_used_from_other_file_p (resolution)) + return true; + return false; +} + +/* Return varpool node for given symbol and check it is a function. */ + +inline varpool_node * +varpool_node::get (const_tree decl) +{ + gcc_checking_assert (TREE_CODE (decl) == VAR_DECL); + return dyn_cast (symtab_node::get (decl)); +} + +/* Register a symbol NODE. */ + +inline void +symbol_table::register_symbol (symtab_node *node) +{ + node->next = nodes; + node->previous = NULL; + + if (nodes) + nodes->previous = node; + nodes = node; + + node->order = order++; +} + +/* Register a top-level asm statement ASM_STR. */ + +asm_node * +symbol_table::finalize_toplevel_asm (tree asm_str) +{ + asm_node *node; + + node = ggc_cleared_alloc (); + node->asm_str = asm_str; + node->order = order++; + node->next = NULL; + + if (asmnodes == NULL) + asmnodes = node; + else + asm_last_node->next = node; + + asm_last_node = node; + return node; +} + +/* Unregister a symbol NODE. */ +inline void +symbol_table::unregister (symtab_node *node) +{ + if (node->previous) + node->previous->next = node->next; + else + nodes = node->next; + + if (node->next) + node->next->previous = node->previous; + + node->next = NULL; + node->previous = NULL; +} + +/* Release a callgraph NODE with UID and put in to the list of free nodes. */ + +inline void +symbol_table::release_symbol (cgraph_node *node) +{ + cgraph_count--; + if (node->m_summary_id != -1) + cgraph_released_summary_ids.safe_push (node->m_summary_id); + ggc_free (node); +} + +/* Return first static symbol with definition. */ +inline symtab_node * +symbol_table::first_symbol (void) +{ + return nodes; +} + +/* Walk all symbols. */ +#define FOR_EACH_SYMBOL(node) \ + for ((node) = symtab->first_symbol (); (node); (node) = (node)->next) + +/* Return first static symbol with definition. */ +inline symtab_node * +symbol_table::first_defined_symbol (void) +{ + symtab_node *node; + + for (node = nodes; node; node = node->next) + if (node->definition) + return node; + + return NULL; +} + +/* Walk all symbols with definitions in current unit. */ +#define FOR_EACH_DEFINED_SYMBOL(node) \ + for ((node) = symtab->first_defined_symbol (); (node); \ + (node) = node->next_defined_symbol ()) + +/* Return first variable. */ +inline varpool_node * +symbol_table::first_variable (void) +{ + symtab_node *node; + for (node = nodes; node; node = node->next) + if (varpool_node *vnode = dyn_cast (node)) + return vnode; + return NULL; +} + +/* Return next variable after NODE. */ +inline varpool_node * +symbol_table::next_variable (varpool_node *node) +{ + symtab_node *node1 = node->next; + for (; node1; node1 = node1->next) + if (varpool_node *vnode1 = dyn_cast (node1)) + return vnode1; + return NULL; +} +/* Walk all variables. */ +#define FOR_EACH_VARIABLE(node) \ + for ((node) = symtab->first_variable (); \ + (node); \ + (node) = symtab->next_variable ((node))) + +/* Return first static variable with initializer. */ +inline varpool_node * +symbol_table::first_static_initializer (void) +{ + symtab_node *node; + for (node = nodes; node; node = node->next) + { + varpool_node *vnode = dyn_cast (node); + if (vnode && DECL_INITIAL (node->decl)) + return vnode; + } + return NULL; +} + +/* Return next static variable with initializer after NODE. */ +inline varpool_node * +symbol_table::next_static_initializer (varpool_node *node) +{ + symtab_node *node1 = node->next; + for (; node1; node1 = node1->next) + { + varpool_node *vnode1 = dyn_cast (node1); + if (vnode1 && DECL_INITIAL (node1->decl)) + return vnode1; + } + return NULL; +} + +/* Walk all static variables with initializer set. */ +#define FOR_EACH_STATIC_INITIALIZER(node) \ + for ((node) = symtab->first_static_initializer (); (node); \ + (node) = symtab->next_static_initializer (node)) + +/* Return first static variable with definition. */ +inline varpool_node * +symbol_table::first_defined_variable (void) +{ + symtab_node *node; + for (node = nodes; node; node = node->next) + { + varpool_node *vnode = dyn_cast (node); + if (vnode && vnode->definition) + return vnode; + } + return NULL; +} + +/* Return next static variable with definition after NODE. */ +inline varpool_node * +symbol_table::next_defined_variable (varpool_node *node) +{ + symtab_node *node1 = node->next; + for (; node1; node1 = node1->next) + { + varpool_node *vnode1 = dyn_cast (node1); + if (vnode1 && vnode1->definition) + return vnode1; + } + return NULL; +} +/* Walk all variables with definitions in current unit. */ +#define FOR_EACH_DEFINED_VARIABLE(node) \ + for ((node) = symtab->first_defined_variable (); (node); \ + (node) = symtab->next_defined_variable (node)) + +/* Return first function with body defined. */ +inline cgraph_node * +symbol_table::first_defined_function (void) +{ + symtab_node *node; + for (node = nodes; node; node = node->next) + { + cgraph_node *cn = dyn_cast (node); + if (cn && cn->definition) + return cn; + } + return NULL; +} + +/* Return next function with body defined after NODE. */ +inline cgraph_node * +symbol_table::next_defined_function (cgraph_node *node) +{ + symtab_node *node1 = node->next; + for (; node1; node1 = node1->next) + { + cgraph_node *cn1 = dyn_cast (node1); + if (cn1 && cn1->definition) + return cn1; + } + return NULL; +} + +/* Walk all functions with body defined. */ +#define FOR_EACH_DEFINED_FUNCTION(node) \ + for ((node) = symtab->first_defined_function (); (node); \ + (node) = symtab->next_defined_function ((node))) + +/* Return first function. */ +inline cgraph_node * +symbol_table::first_function (void) +{ + symtab_node *node; + for (node = nodes; node; node = node->next) + if (cgraph_node *cn = dyn_cast (node)) + return cn; + return NULL; +} + +/* Return next function. */ +inline cgraph_node * +symbol_table::next_function (cgraph_node *node) +{ + symtab_node *node1 = node->next; + for (; node1; node1 = node1->next) + if (cgraph_node *cn1 = dyn_cast (node1)) + return cn1; + return NULL; +} + +/* Return first function with body defined. */ +inline cgraph_node * +symbol_table::first_function_with_gimple_body (void) +{ + symtab_node *node; + for (node = nodes; node; node = node->next) + { + cgraph_node *cn = dyn_cast (node); + if (cn && cn->has_gimple_body_p ()) + return cn; + } + return NULL; +} + +/* Return next reachable static variable with initializer after NODE. */ +inline cgraph_node * +symbol_table::next_function_with_gimple_body (cgraph_node *node) +{ + symtab_node *node1 = node->next; + for (; node1; node1 = node1->next) + { + cgraph_node *cn1 = dyn_cast (node1); + if (cn1 && cn1->has_gimple_body_p ()) + return cn1; + } + return NULL; +} + +/* Walk all functions. */ +#define FOR_EACH_FUNCTION(node) \ + for ((node) = symtab->first_function (); (node); \ + (node) = symtab->next_function ((node))) + +/* Return true when callgraph node is a function with Gimple body defined + in current unit. Functions can also be define externally or they + can be thunks with no Gimple representation. + + Note that at WPA stage, the function body may not be present in memory. */ + +inline bool +cgraph_node::has_gimple_body_p (void) +{ + return definition && !thunk && !alias; +} + +/* Walk all functions with body defined. */ +#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \ + for ((node) = symtab->first_function_with_gimple_body (); (node); \ + (node) = symtab->next_function_with_gimple_body (node)) + +/* Uniquize all constants that appear in memory. + Each constant in memory thus far output is recorded + in `const_desc_table'. */ + +struct GTY((for_user)) constant_descriptor_tree { + /* A MEM for the constant. */ + rtx rtl; + + /* The value of the constant. */ + tree value; + + /* Hash of value. Computing the hash from value each time + hashfn is called can't work properly, as that means recursive + use of the hash table during hash table expansion. */ + hashval_t hash; +}; + +/* Return true when function is only called directly or it has alias. + i.e. it is not externally visible, address was not taken and + it is not used in any other non-standard way. */ + +inline bool +cgraph_node::only_called_directly_or_aliased_p (void) +{ + gcc_assert (!inlined_to); + return (!force_output && !address_taken + && !ifunc_resolver + && !used_from_other_partition + && !DECL_VIRTUAL_P (decl) + && !DECL_STATIC_CONSTRUCTOR (decl) + && !DECL_STATIC_DESTRUCTOR (decl) + && !used_from_object_file_p () + && !externally_visible); +} + +/* Return true when function can be removed from callgraph + if all direct calls are eliminated. */ + +inline bool +cgraph_node::can_remove_if_no_direct_calls_and_refs_p (void) +{ + gcc_checking_assert (!inlined_to); + /* Extern inlines can always go, we will use the external definition. */ + if (DECL_EXTERNAL (decl)) + return true; + /* When function is needed, we cannot remove it. */ + if (force_output || used_from_other_partition) + return false; + if (DECL_STATIC_CONSTRUCTOR (decl) + || DECL_STATIC_DESTRUCTOR (decl)) + return false; + /* Only COMDAT functions can be removed if externally visible. */ + if (externally_visible + && ((!DECL_COMDAT (decl) || ifunc_resolver) + || forced_by_abi + || used_from_object_file_p ())) + return false; + return true; +} + +/* Verify cgraph, if consistency checking is enabled. */ + +inline void +cgraph_node::checking_verify_cgraph_nodes (void) +{ + if (flag_checking) + cgraph_node::verify_cgraph_nodes (); +} + +/* Return true when variable can be removed from variable pool + if all direct calls are eliminated. */ + +inline bool +varpool_node::can_remove_if_no_refs_p (void) +{ + if (DECL_EXTERNAL (decl)) + return true; + return (!force_output && !used_from_other_partition + && ((DECL_COMDAT (decl) + && !forced_by_abi + && !used_from_object_file_p ()) + || !externally_visible + || DECL_HAS_VALUE_EXPR_P (decl))); +} + +/* Return true when all references to variable must be visible in ipa_ref_list. + i.e. if the variable is not externally visible or not used in some magic + way (asm statement or such). + The magic uses are all summarized in force_output flag. */ + +inline bool +varpool_node::all_refs_explicit_p () +{ + return (definition + && !externally_visible + && !used_from_other_partition + && !force_output); +} + +struct tree_descriptor_hasher : ggc_ptr_hash +{ + static hashval_t hash (constant_descriptor_tree *); + static bool equal (constant_descriptor_tree *, constant_descriptor_tree *); +}; + +/* Constant pool accessor function. */ +hash_table *constant_pool_htab (void); + +/* Return node that alias is aliasing. */ + +inline cgraph_node * +cgraph_node::get_alias_target (void) +{ + return dyn_cast (symtab_node::get_alias_target ()); +} + +/* Return node that alias is aliasing. */ + +inline varpool_node * +varpool_node::get_alias_target (void) +{ + return dyn_cast (symtab_node::get_alias_target ()); +} + +/* Walk the alias chain to return the symbol NODE is alias of. + If NODE is not an alias, return NODE. + When AVAILABILITY is non-NULL, get minimal availability in the chain. + When REF is non-NULL, assume that reference happens in symbol REF + when determining the availability. */ + +inline symtab_node * +symtab_node::ultimate_alias_target (enum availability *availability, + symtab_node *ref) +{ + if (!alias) + { + if (availability) + *availability = get_availability (ref); + return this; + } + + return ultimate_alias_target_1 (availability, ref); +} + +/* Given function symbol, walk the alias chain to return the function node + is alias of. Do not walk through thunks. + When AVAILABILITY is non-NULL, get minimal availability in the chain. + When REF is non-NULL, assume that reference happens in symbol REF + when determining the availability. */ + +inline cgraph_node * +cgraph_node::ultimate_alias_target (enum availability *availability, + symtab_node *ref) +{ + cgraph_node *n = dyn_cast + (symtab_node::ultimate_alias_target (availability, ref)); + if (!n && availability) + *availability = AVAIL_NOT_AVAILABLE; + return n; +} + +/* For given variable pool node, walk the alias chain to return the function + the variable is alias of. Do not walk through thunks. + When AVAILABILITY is non-NULL, get minimal availability in the chain. + When REF is non-NULL, assume that reference happens in symbol REF + when determining the availability. */ + +inline varpool_node * +varpool_node::ultimate_alias_target (availability *availability, + symtab_node *ref) +{ + varpool_node *n = dyn_cast + (symtab_node::ultimate_alias_target (availability, ref)); + + if (!n && availability) + *availability = AVAIL_NOT_AVAILABLE; + return n; +} + +/* Set callee N of call graph edge and add it to the corresponding set of + callers. */ + +inline void +cgraph_edge::set_callee (cgraph_node *n) +{ + prev_caller = NULL; + if (n->callers) + n->callers->prev_caller = this; + next_caller = n->callers; + n->callers = this; + callee = n; +} + +/* Return true when the edge represents a direct recursion. */ + +inline bool +cgraph_edge::recursive_p (void) +{ + cgraph_node *c = callee->ultimate_alias_target (); + if (caller->inlined_to) + return caller->inlined_to->decl == c->decl; + else + return caller->decl == c->decl; +} + +/* Remove the edge from the list of the callers of the callee. */ + +inline void +cgraph_edge::remove_callee (void) +{ + gcc_assert (!indirect_unknown_callee); + if (prev_caller) + prev_caller->next_caller = next_caller; + if (next_caller) + next_caller->prev_caller = prev_caller; + if (!prev_caller) + callee->callers = next_caller; +} + +/* Return true if call must bind to current definition. */ + +inline bool +cgraph_edge::binds_to_current_def_p () +{ + if (callee) + return callee->binds_to_current_def_p (caller); + else + return false; +} + +/* Expected frequency of executions within the function. + When set to CGRAPH_FREQ_BASE, the edge is expected to be called once + per function call. The range is 0 to CGRAPH_FREQ_MAX. */ + +inline int +cgraph_edge::frequency () +{ + return count.to_cgraph_frequency (caller->inlined_to + ? caller->inlined_to->count + : caller->count); +} + + +/* Return true if the TM_CLONE bit is set for a given FNDECL. */ +static inline bool +decl_is_tm_clone (const_tree fndecl) +{ + cgraph_node *n = cgraph_node::get (fndecl); + if (n) + return n->tm_clone; + return false; +} + +/* Likewise indicate that a node is needed, i.e. reachable via some + external means. */ + +inline void +cgraph_node::mark_force_output (void) +{ + force_output = 1; + gcc_checking_assert (!inlined_to); +} + +/* Return true if function should be optimized for size. */ + +inline enum optimize_size_level +cgraph_node::optimize_for_size_p (void) +{ + if (opt_for_fn (decl, optimize_size)) + return OPTIMIZE_SIZE_MAX; + if (count == profile_count::zero ()) + return OPTIMIZE_SIZE_MAX; + if (frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED) + return OPTIMIZE_SIZE_BALANCED; + else + return OPTIMIZE_SIZE_NO; +} + +/* Return symtab_node for NODE or create one if it is not present + in symtab. */ + +inline symtab_node * +symtab_node::get_create (tree node) +{ + if (TREE_CODE (node) == VAR_DECL) + return varpool_node::get_create (node); + else + return cgraph_node::get_create (node); +} + +/* Return availability of NODE when referenced from REF. */ + +inline enum availability +symtab_node::get_availability (symtab_node *ref) +{ + if (is_a (this)) + return dyn_cast (this)->get_availability (ref); + else + return dyn_cast (this)->get_availability (ref); +} + +/* Call callback on symtab node and aliases associated to this node. + When INCLUDE_OVERWRITABLE is false, overwritable symbols are skipped. */ + +inline bool +symtab_node::call_for_symbol_and_aliases (bool (*callback) (symtab_node *, + void *), + void *data, + bool include_overwritable) +{ + if (include_overwritable + || get_availability () > AVAIL_INTERPOSABLE) + { + if (callback (this, data)) + return true; + } + if (has_aliases_p ()) + return call_for_symbol_and_aliases_1 (callback, data, include_overwritable); + return false; +} + +/* Call callback on function and aliases associated to the function. + When INCLUDE_OVERWRITABLE is false, overwritable symbols are + skipped. */ + +inline bool +cgraph_node::call_for_symbol_and_aliases (bool (*callback) (cgraph_node *, + void *), + void *data, + bool include_overwritable) +{ + if (include_overwritable + || get_availability () > AVAIL_INTERPOSABLE) + { + if (callback (this, data)) + return true; + } + if (has_aliases_p ()) + return call_for_symbol_and_aliases_1 (callback, data, include_overwritable); + return false; +} + +/* Call callback on varpool symbol and aliases associated to varpool symbol. + When INCLUDE_OVERWRITABLE is false, overwritable symbols are + skipped. */ + +inline bool +varpool_node::call_for_symbol_and_aliases (bool (*callback) (varpool_node *, + void *), + void *data, + bool include_overwritable) +{ + if (include_overwritable + || get_availability () > AVAIL_INTERPOSABLE) + { + if (callback (this, data)) + return true; + } + if (has_aliases_p ()) + return call_for_symbol_and_aliases_1 (callback, data, include_overwritable); + return false; +} + +/* Return true if reference may be used in address compare. */ + +inline bool +ipa_ref::address_matters_p () +{ + if (use != IPA_REF_ADDR) + return false; + /* Addresses taken from virtual tables are never compared. */ + if (is_a (referring) + && DECL_VIRTUAL_P (referring->decl)) + return false; + return referred->address_can_be_compared_p (); +} + +/* Build polymorphic call context for indirect call E. */ + +inline +ipa_polymorphic_call_context::ipa_polymorphic_call_context (cgraph_edge *e) +{ + gcc_checking_assert (e->indirect_info->polymorphic); + *this = e->indirect_info->context; +} + +/* Build empty "I know nothing" context. */ + +inline +ipa_polymorphic_call_context::ipa_polymorphic_call_context () +{ + clear_speculation (); + clear_outer_type (); + invalid = false; +} + +/* Make context non-speculative. */ + +inline void +ipa_polymorphic_call_context::clear_speculation () +{ + speculative_outer_type = NULL; + speculative_offset = 0; + speculative_maybe_derived_type = false; +} + +/* Produce context specifying all derived types of OTR_TYPE. If OTR_TYPE is + NULL, the context is set to dummy "I know nothing" setting. */ + +inline void +ipa_polymorphic_call_context::clear_outer_type (tree otr_type) +{ + outer_type = otr_type ? TYPE_MAIN_VARIANT (otr_type) : NULL; + offset = 0; + maybe_derived_type = true; + maybe_in_construction = true; + dynamic = true; +} + +/* Adjust all offsets in contexts by OFF bits. */ + +inline void +ipa_polymorphic_call_context::offset_by (HOST_WIDE_INT off) +{ + if (outer_type) + offset += off; + if (speculative_outer_type) + speculative_offset += off; +} + +/* Return TRUE if context is fully useless. */ + +inline bool +ipa_polymorphic_call_context::useless_p () const +{ + return (!outer_type && !speculative_outer_type); +} + +/* When using fprintf (or similar), problems can arise with + transient generated strings. Many string-generation APIs + only support one result being alive at once (e.g. by + returning a pointer to a statically-allocated buffer). + + If there is more than one generated string within one + fprintf call: the first string gets evicted or overwritten + by the second, before fprintf is fully evaluated. + See e.g. PR/53136. + + This function provides a workaround for this, by providing + a simple way to create copies of these transient strings, + without the need to have explicit cleanup: + + fprintf (dumpfile, "string 1: %s string 2:%s\n", + xstrdup_for_dump (EXPR_1), + xstrdup_for_dump (EXPR_2)); + + This is actually a simple wrapper around ggc_strdup, but + the name documents the intent. We require that no GC can occur + within the fprintf call. */ + +static inline const char * +xstrdup_for_dump (const char *transient_str) +{ + return ggc_strdup (transient_str); +} + +/* During LTO stream-in this predicate can be used to check whether node + in question prevails in the linking to save some memory usage. */ +inline bool +symtab_node::prevailing_p (void) +{ + return definition && ((!TREE_PUBLIC (decl) && !DECL_EXTERNAL (decl)) + || previous_sharing_asm_name == NULL); +} + +extern GTY(()) symbol_table *saved_symtab; + +#if CHECKING_P + +namespace selftest { + +/* An RAII-style class for use in selftests for temporarily using a different + symbol_table, so that such tests can be isolated from each other. */ + +class symbol_table_test +{ + public: + /* Constructor. Override "symtab". */ + symbol_table_test (); + + /* Destructor. Restore the saved_symtab. */ + ~symbol_table_test (); +}; + +} // namespace selftest + +#endif /* CHECKING_P */ + +#endif /* GCC_CGRAPH_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cif-code.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cif-code.def new file mode 100644 index 0000000..ea604e1 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cif-code.def @@ -0,0 +1,144 @@ +/* This file contains the definitions of the cgraph_inline_failed_t + enums used in GCC. + + Copyright (C) 2008-2022 Free Software Foundation, Inc. + Contributed by Doug Kwan + +This file is part of GCC. + +GCC is free software you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC see the file COPYING3. If not see +. */ + +/* The format of this file is + DEFCIFCODE(code, string). + + Where symbol is the enumeration name without the ``''. + The argument STRING is a explain the failure. Except for OK, + which is a NULL pointer. */ + +/* Inlining successful. This must be the first code. */ +DEFCIFCODE(OK, CIF_FINAL_NORMAL, NULL) + +/* Inlining failed for an unspecified reason. */ +DEFCIFCODE(UNSPECIFIED, CIF_FINAL_ERROR, "") + +/* Function has not be considered for inlining. This is the code for + functions that have not been rejected for inlining yet. */ +DEFCIFCODE(FUNCTION_NOT_CONSIDERED, CIF_FINAL_NORMAL, + N_("function not considered for inlining")) + +/* Caller is compiled with optimizations disabled. */ +DEFCIFCODE(FUNCTION_NOT_OPTIMIZED, CIF_FINAL_ERROR, + N_("caller is not optimized")) + +/* Inlining failed owing to unavailable function body. */ +DEFCIFCODE(BODY_NOT_AVAILABLE, CIF_FINAL_ERROR, + N_("function body not available")) + +/* Extern inline function that has been redefined. */ +DEFCIFCODE(REDEFINED_EXTERN_INLINE, CIF_FINAL_ERROR, + N_("redefined extern inline functions are not considered for " + "inlining")) + +/* Function is not inlinable. */ +DEFCIFCODE(FUNCTION_NOT_INLINABLE, CIF_FINAL_ERROR, + N_("function not inlinable")) + +/* Function is overwritable. */ +DEFCIFCODE(OVERWRITABLE, CIF_FINAL_ERROR, + N_("function body can be overwritten at link time")) + +/* Function is not an inlining candidate. */ +DEFCIFCODE(FUNCTION_NOT_INLINE_CANDIDATE, CIF_FINAL_NORMAL, + N_("function not inline candidate")) + +/* Inlining failed because of various limit parameters. */ +DEFCIFCODE(LARGE_FUNCTION_GROWTH_LIMIT, CIF_FINAL_NORMAL, + N_("--param large-function-growth limit reached")) +DEFCIFCODE(LARGE_STACK_FRAME_GROWTH_LIMIT, CIF_FINAL_NORMAL, + N_("--param large-stack-frame-growth limit reached")) +DEFCIFCODE(MAX_INLINE_INSNS_SINGLE_LIMIT, CIF_FINAL_NORMAL, + N_("--param max-inline-insns-single limit reached")) +DEFCIFCODE(MAX_INLINE_INSNS_AUTO_LIMIT, CIF_FINAL_NORMAL, + N_("--param max-inline-insns-auto limit reached")) +DEFCIFCODE(INLINE_UNIT_GROWTH_LIMIT, CIF_FINAL_NORMAL, + N_("--param inline-unit-growth limit reached")) + +/* Recursive inlining. */ +DEFCIFCODE(RECURSIVE_INLINING, CIF_FINAL_NORMAL, + N_("recursive inlining")) + +/* Call is unlikely. */ +DEFCIFCODE(UNLIKELY_CALL, CIF_FINAL_NORMAL, + N_("call is unlikely and code size would grow")) + +/* Call is considered never executed. */ +DEFCIFCODE(NEVER_CALL, CIF_FINAL_NORMAL, + N_("call is considered never executed and code size would grow")) + +/* Function is not declared as inline. */ +DEFCIFCODE(NOT_DECLARED_INLINED, CIF_FINAL_NORMAL, + N_("function not declared inline and code size would grow")) + +/* Caller and callee disagree on the arguments. */ +DEFCIFCODE(LTO_MISMATCHED_DECLARATIONS, CIF_FINAL_ERROR, + N_("mismatched declarations during linktime optimization")) + +/* Caller is variadic thunk. */ +DEFCIFCODE(VARIADIC_THUNK, CIF_FINAL_ERROR, + N_("variadic thunk call")) + +/* Call was originally indirect. */ +DEFCIFCODE(ORIGINALLY_INDIRECT_CALL, CIF_FINAL_NORMAL, + N_("originally indirect function call not considered for inlining")) + +/* Ths edge represents an indirect edge with a yet-undetermined callee . */ +DEFCIFCODE(INDIRECT_UNKNOWN_CALL, CIF_FINAL_NORMAL, + N_("indirect function call with a yet undetermined callee")) + +/* We can't inline different EH personalities together. */ +DEFCIFCODE(EH_PERSONALITY, CIF_FINAL_ERROR, + N_("exception handling personality mismatch")) + +/* We can't inline if the callee can throw non-call exceptions but the + caller cannot. */ +DEFCIFCODE(NON_CALL_EXCEPTIONS, CIF_FINAL_ERROR, + N_("non-call exception handling mismatch")) + +/* We can't inline because of mismatched target specific options. */ +DEFCIFCODE(TARGET_OPTION_MISMATCH, CIF_FINAL_ERROR, + N_("target specific option mismatch")) + +/* We can't inline because of mismatched optimization levels. */ +DEFCIFCODE(OPTIMIZATION_MISMATCH, CIF_FINAL_ERROR, + N_("optimization level attribute mismatch")) + +/* We can't inline because the callee refers to comdat-local symbols. */ +DEFCIFCODE(USES_COMDAT_LOCAL, CIF_FINAL_NORMAL, + N_("callee refers to comdat-local symbols")) + +/* We can't inline because of mismatched caller/callee + sanitizer attributes. */ +DEFCIFCODE(SANITIZE_ATTRIBUTE_MISMATCH, CIF_FINAL_ERROR, + N_("sanitizer function attribute mismatch")) + +/* We can't inline because the user requests only static functions + but the function has external linkage for live patching purpose. */ +DEFCIFCODE(EXTERN_LIVE_ONLY_STATIC, CIF_FINAL_ERROR, + N_("function has external linkage when the user requests only" + " inlining static for live patching")) + +/* We proved that the call is unreachable. */ +DEFCIFCODE(UNREACHABLE, CIF_FINAL_ERROR, + N_("unreachable")) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/collect-utils.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/collect-utils.h new file mode 100644 index 0000000..98cacea --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/collect-utils.h @@ -0,0 +1,51 @@ +/* Utility functions used by tools like collect2 and lto-wrapper. + Copyright (C) 2009-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_COLLECT_UTILS_H +#define GCC_COLLECT_UTILS_H + +/* Provided in collect-utils.cc. */ +extern void notice (const char *, ...) + __attribute__ ((format (printf, 1, 2))); +extern void fatal_signal (int); +extern void setup_signals (void); + +extern struct pex_obj *collect_execute (const char *, char **, + const char *, const char *, + int, bool, const char *); +extern int collect_wait (const char *, struct pex_obj *); +extern void do_wait (const char *, struct pex_obj *); +extern void fork_execute (const char *, char **, bool, const char *); +extern void utils_cleanup (bool); + + +extern bool debug; +extern bool verbose; +extern bool save_temps; +extern const char *dumppfx; + +/* Provided by the tool itself. */ + +/* The name of the tool, printed in error messages. */ +extern const char tool_name[]; +/* Called by utils_cleanup. */ +extern void tool_cleanup (bool); +extern void maybe_unlink (const char *); + +#endif /* GCC_COLLECT_UTILS_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/collect2-aix.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/collect2-aix.h new file mode 100644 index 0000000..7efbca5 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/collect2-aix.h @@ -0,0 +1,306 @@ +/* AIX cross support for collect2. + Copyright (C) 2009-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_COLLECT2_AIX_H +#define GCC_COLLECT2_AIX_H +/* collect2-aix.cc requires mmap support. It should otherwise be + fairly portable. */ +#if defined(CROSS_DIRECTORY_STRUCTURE) \ + && defined(TARGET_AIX_VERSION) \ + && HAVE_MMAP + +#define CROSS_AIX_SUPPORT 1 + +/* ------------------------------------------------------------------------- + Definitions adapted from bfd. (Fairly heavily adapted in some cases.) + ------------------------------------------------------------------------- */ + +/* Compatibility types for bfd. */ +typedef unsigned HOST_WIDE_INT bfd_vma; + +/* The size of an archive's fl_magic field. */ +#define FL_MAGIC_SIZE 8 + +/* The expected contents of fl_magic for big archives. */ +#define FL_MAGIC_BIG_AR "\012" + +/* The size of each offset string in the header of a big archive. */ +#define AR_BIG_OFFSET_SIZE 20 + +/* The format of the file header in a "big" XCOFF archive. */ +struct external_big_ar_filehdr +{ + /* Magic string. */ + char fl_magic[FL_MAGIC_SIZE]; + + /* Offset of the member table (decimal ASCII string). */ + char fl_memoff[AR_BIG_OFFSET_SIZE]; + + /* Offset of the global symbol table for 32-bit objects (decimal ASCII + string). */ + char fl_symoff[AR_BIG_OFFSET_SIZE]; + + /* Offset of the global symbol table for 64-bit objects (decimal ASCII + string). */ + char fl_symoff64[AR_BIG_OFFSET_SIZE]; + + /* Offset of the first member in the archive (decimal ASCII string). */ + char fl_firstmemoff[AR_BIG_OFFSET_SIZE]; + + /* Offset of the last member in the archive (decimal ASCII string). */ + char fl_lastmemoff[AR_BIG_OFFSET_SIZE]; + + /* Offset of the first member on the free list (decimal ASCII + string). */ + char fl_freeoff[AR_BIG_OFFSET_SIZE]; +}; + +/* Each archive name is followed by this many bytes of magic string. */ +#define SXCOFFARFMAG 2 + +/* The format of a member header in a "big" XCOFF archive. */ +struct external_big_ar_member +{ + /* File size not including the header (decimal ASCII string). */ + char ar_size[AR_BIG_OFFSET_SIZE]; + + /* File offset of next archive member (decimal ASCII string). */ + char ar_nextoff[AR_BIG_OFFSET_SIZE]; + + /* File offset of previous archive member (decimal ASCII string). */ + char ar_prevoff[AR_BIG_OFFSET_SIZE]; + + /* File mtime (decimal ASCII string). */ + char ar_date[12]; + + /* File UID (decimal ASCII string). */ + char ar_uid[12]; + + /* File GID (decimal ASCII string). */ + char ar_gid[12]; + + /* File mode (octal ASCII string). */ + char ar_mode[12]; + + /* Length of file name (decimal ASCII string). */ + char ar_namlen[4]; + + /* This structure is followed by the file name. The length of the + name is given in the namlen field. If the length of the name is + odd, the name is followed by a null byte. The name and optional + null byte are followed by XCOFFARFMAG, which is not included in + namlen. The contents of the archive member follow; the number of + bytes is given in the size field. */ +}; + +/* The known values of f_magic in an XCOFF file header. */ +#define U802WRMAGIC 0730 /* Writeable text segments. */ +#define U802ROMAGIC 0735 /* Readonly sharable text segments. */ +#define U802TOCMAGIC 0737 /* Readonly text segments and TOC. */ +#define U803XTOCMAGIC 0757 /* Aix 4.3 64-bit XCOFF. */ +#define U64_TOCMAGIC 0767 /* AIX 5+ 64-bit XCOFF. */ + +/* The number of bytes in an XCOFF file's f_magic field. */ +#define F_MAGIC_SIZE 2 + +/* The format of a 32-bit XCOFF file header. */ +struct external_filehdr_32 +{ + /* The magic number. */ + char f_magic[F_MAGIC_SIZE]; + + /* The number of sections. */ + char f_nscns[2]; + + /* Time & date stamp. */ + char f_timdat[4]; + + /* The offset of the symbol table from the start of the file. */ + char f_symptr[4]; + + /* The number of entries in the symbol table. */ + char f_nsyms[4]; + + /* The size of the auxiliary header. */ + char f_opthdr[2]; + + /* Flags. */ + char f_flags[2]; +}; + +/* The format of a 64-bit XCOFF file header. */ +struct external_filehdr_64 +{ + /* The magic number. */ + char f_magic[F_MAGIC_SIZE]; + + /* The number of sections. */ + char f_nscns[2]; + + /* Time & date stamp. */ + char f_timdat[4]; + + /* The offset of the symbol table from the start of the file. */ + char f_symptr[8]; + + /* The size of the auxiliary header. */ + char f_opthdr[2]; + + /* Flags. */ + char f_flags[2]; + + /* The number of entries in the symbol table. */ + char f_nsyms[4]; +}; + +/* An internal representation of the XCOFF file header. */ +struct internal_filehdr +{ + unsigned short f_magic; + unsigned short f_nscns; + long f_timdat; + bfd_vma f_symptr; + long f_nsyms; + unsigned short f_opthdr; + unsigned short f_flags; +}; + +/* Symbol classes have their names in the debug section if this flag + is set. */ +#define DBXMASK 0x80 + +/* The format of an XCOFF symbol-table entry. */ +struct external_syment +{ + union { + struct { + union { + /* The name of the symbol. There is an implicit null character + after the end of the array. */ + char n_name[8]; + struct { + /* If n_zeroes is zero, n_offset is the offset the name from + the start of the string table. */ + char n_zeroes[4]; + char n_offset[4]; + } u; + } u; + + /* The symbol's value. */ + char n_value[4]; + } xcoff32; + struct { + /* The symbol's value. */ + char n_value[8]; + + /* The offset of the symbol from the start of the string table. */ + char n_offset[4]; + } xcoff64; + } u; + + /* The number of the section to which this symbol belongs. */ + char n_scnum[2]; + + /* The type of symbol. (It can be interpreted as an n_lang + and an n_cpu byte, but we don't care about that here.) */ + char n_type[2]; + + /* The class of symbol (a C_* value). */ + char n_sclass[1]; + + /* The number of auxiliary symbols attached to this entry. */ + char n_numaux[1]; +}; + +/* Definitions required by collect2. */ +#define C_EXT 2 + +#define F_SHROBJ 0x2000 +#define F_LOADONLY 0x4000 + +#define N_UNDEF ((short) 0) +#define N_TMASK 060 +#define N_BTSHFT 4 + +#define DT_NON 0 +#define DT_FCN 2 + +/* ------------------------------------------------------------------------- + Local code. + ------------------------------------------------------------------------- */ + +/* An internal representation of an XCOFF symbol-table entry, + which is associated with the API-defined SYMENT type. */ +struct internal_syment +{ + char n_name[9]; + unsigned int n_zeroes; + bfd_vma n_offset; + bfd_vma n_value; + short n_scnum; + unsigned short n_flags; + unsigned short n_type; + unsigned char n_sclass; + unsigned char n_numaux; +}; +typedef struct internal_syment SYMENT; + +/* The internal representation of the API-defined LDFILE type. */ +struct internal_ldfile +{ + /* The file handle for the associated file, or -1 if it hasn't been + opened yet. */ + int fd; + + /* The start of the current XCOFF object, if one has been mapped + into memory. Null otherwise. */ + char *object; + + /* The offset of OBJECT from the start of the containing page. */ + size_t page_offset; + + /* The size of the file pointed to by OBJECT. Valid iff OFFSET + is nonnull. */ + size_t object_size; + + /* The offset of the next member in an archive after OBJECT, + or -1 if this isn't an archive. Valid iff OFFSET is nonnull. */ + off_t next_member; + + /* The parsed version of the XCOFF file header. */ + struct internal_filehdr filehdr; +}; +typedef struct internal_ldfile LDFILE; + +/* The API allows the file header to be directly accessed via this macro. */ +#define HEADER(FILE) ((FILE)->filehdr) + +/* API-defined return codes. SUCCESS must be > 0 and FAILURE must be <= 0. */ +#define SUCCESS 1 +#define FAILURE 0 + +/* API-defined functions. */ +extern LDFILE *ldopen (char *, LDFILE *); +extern char *ldgetname (LDFILE *, SYMENT *); +extern int ldtbread (LDFILE *, long, SYMENT *); +extern int ldclose (LDFILE *); + +#endif + +#endif /* GCC_COLLECT2_AIX_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/collect2.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/collect2.h new file mode 100644 index 0000000..5aadf03 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/collect2.h @@ -0,0 +1,39 @@ +/* Header file for collect routines. + Copyright (C) 1998-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_COLLECT2_H +#define GCC_COLLECT2_H + +extern struct pex_obj *collect_execute (const char *, char **, const char *, + const char *, int flags); + +extern int collect_wait (const char *, struct pex_obj *); + +extern int file_exists (const char *); + +extern const char *c_file_name; +extern struct obstack temporary_obstack; +extern char *temporary_firstobj; +extern bool may_unlink_output_file; + +extern void notice_translated (const char *, ...) ATTRIBUTE_PRINTF_1; +extern void notice (const char *, ...) ATTRIBUTE_PRINTF_1; + +extern bool at_file_supplied; +#endif /* ! GCC_COLLECT2_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/color-macros.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/color-macros.h new file mode 100644 index 0000000..d144b34 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/color-macros.h @@ -0,0 +1,108 @@ +/* Terminal color manipulation macros. + Copyright (C) 2005-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_COLOR_MACROS_H +#define GCC_COLOR_MACROS_H + +/* Select Graphic Rendition (SGR, "\33[...m") strings. */ +/* Also Erase in Line (EL) to Right ("\33[K") by default. */ +/* Why have EL to Right after SGR? + -- The behavior of line-wrapping when at the bottom of the + terminal screen and at the end of the current line is often + such that a new line is introduced, entirely cleared with + the current background color which may be different from the + default one (see the boolean back_color_erase terminfo(5) + capability), thus scrolling the display by one line. + The end of this new line will stay in this background color + even after reverting to the default background color with + "\33[m', unless it is explicitly cleared again with "\33[K" + (which is the behavior the user would instinctively expect + from the whole thing). There may be some unavoidable + background-color flicker at the end of this new line because + of this (when timing with the monitor's redraw is just right). + -- The behavior of HT (tab, "\t") is usually the same as that of + Cursor Forward Tabulation (CHT) with a default parameter + of 1 ("\33[I"), i.e., it performs pure movement to the next + tab stop, without any clearing of either content or screen + attributes (including background color); try + printf 'asdfqwerzxcv\rASDF\tZXCV\n' + in a bash(1) shell to demonstrate this. This is not what the + user would instinctively expect of HT (but is ok for CHT). + The instinctive behavior would include clearing the terminal + cells that are skipped over by HT with blank cells in the + current screen attributes, including background color; + the boolean dest_tabs_magic_smso terminfo(5) capability + indicates this saner behavior for HT, but only some rare + terminals have it (although it also indicates a special + glitch with standout mode in the Teleray terminal for which + it was initially introduced). The remedy is to add "\33K" + after each SGR sequence, be it START (to fix the behavior + of any HT after that before another SGR) or END (to fix the + behavior of an HT in default background color that would + follow a line-wrapping at the bottom of the screen in another + background color, and to complement doing it after START). + Piping GCC's output through a pager such as less(1) avoids + any HT problems since the pager performs tab expansion. + + Generic disadvantages of this remedy are: + -- Some very rare terminals might support SGR but not EL (nobody + will use "gcc -fdiagnostics-color" on a terminal that does not + support SGR in the first place). + -- Having these extra control sequences might somewhat complicate + the task of any program trying to parse "gcc -fdiagnostics-color" + output in order to extract structuring information from it. + A specific disadvantage to doing it after SGR START is: + -- Even more possible background color flicker (when timing + with the monitor's redraw is just right), even when not at the + bottom of the screen. + There are no additional disadvantages specific to doing it after + SGR END. + + It would be impractical for GCC to become a full-fledged + terminal program linked against ncurses or the like, so it will + not detect terminfo(5) capabilities. */ + +#define COLOR_SEPARATOR ";" +#define COLOR_NONE "00" +#define COLOR_BOLD "01" +#define COLOR_UNDERSCORE "04" +#define COLOR_BLINK "05" +#define COLOR_REVERSE "07" +#define COLOR_FG_BLACK "30" +#define COLOR_FG_RED "31" +#define COLOR_FG_GREEN "32" +#define COLOR_FG_YELLOW "33" +#define COLOR_FG_BLUE "34" +#define COLOR_FG_MAGENTA "35" +#define COLOR_FG_CYAN "36" +#define COLOR_FG_WHITE "37" +#define COLOR_BG_BLACK "40" +#define COLOR_BG_RED "41" +#define COLOR_BG_GREEN "42" +#define COLOR_BG_YELLOW "43" +#define COLOR_BG_BLUE "44" +#define COLOR_BG_MAGENTA "45" +#define COLOR_BG_CYAN "46" +#define COLOR_BG_WHITE "47" +#define SGR_START "\33[" +#define SGR_END "m\33[K" +#define SGR_SEQ(str) SGR_START str SGR_END +#define SGR_RESET SGR_SEQ("") + +#endif /* GCC_COLOR_MACROS_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/common/config/i386/i386-cpuinfo.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/common/config/i386/i386-cpuinfo.h new file mode 100644 index 0000000..3f6d201 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/common/config/i386/i386-cpuinfo.h @@ -0,0 +1,255 @@ +/* Get CPU type and Features for x86 processors. + Copyright (C) 2012-2022 Free Software Foundation, Inc. + Contributed by Sriraman Tallam (tmsriram@google.com) + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* Processor Vendor and Models. */ + +enum processor_vendor +{ + VENDOR_INTEL = 1, + VENDOR_AMD, + VENDOR_OTHER, + VENDOR_CENTAUR, + VENDOR_CYRIX, + VENDOR_NSC, + BUILTIN_VENDOR_MAX = VENDOR_OTHER, + VENDOR_MAX +}; + +/* Any new types or subtypes have to be inserted at the end. */ + +enum processor_types +{ + INTEL_BONNELL = 1, + INTEL_CORE2, + INTEL_COREI7, + AMDFAM10H, + AMDFAM15H, + INTEL_SILVERMONT, + INTEL_KNL, + AMD_BTVER1, + AMD_BTVER2, + AMDFAM17H, + INTEL_KNM, + INTEL_GOLDMONT, + INTEL_GOLDMONT_PLUS, + INTEL_TREMONT, + AMDFAM19H, + CPU_TYPE_MAX, + BUILTIN_CPU_TYPE_MAX = CPU_TYPE_MAX +}; + +enum processor_subtypes +{ + INTEL_COREI7_NEHALEM = 1, + INTEL_COREI7_WESTMERE, + INTEL_COREI7_SANDYBRIDGE, + AMDFAM10H_BARCELONA, + AMDFAM10H_SHANGHAI, + AMDFAM10H_ISTANBUL, + AMDFAM15H_BDVER1, + AMDFAM15H_BDVER2, + AMDFAM15H_BDVER3, + AMDFAM15H_BDVER4, + AMDFAM17H_ZNVER1, + INTEL_COREI7_IVYBRIDGE, + INTEL_COREI7_HASWELL, + INTEL_COREI7_BROADWELL, + INTEL_COREI7_SKYLAKE, + INTEL_COREI7_SKYLAKE_AVX512, + INTEL_COREI7_CANNONLAKE, + INTEL_COREI7_ICELAKE_CLIENT, + INTEL_COREI7_ICELAKE_SERVER, + AMDFAM17H_ZNVER2, + INTEL_COREI7_CASCADELAKE, + INTEL_COREI7_TIGERLAKE, + INTEL_COREI7_COOPERLAKE, + INTEL_COREI7_SAPPHIRERAPIDS, + INTEL_COREI7_ALDERLAKE, + AMDFAM19H_ZNVER3, + INTEL_COREI7_ROCKETLAKE, + CPU_SUBTYPE_MAX +}; + +/* Priority of i386 features, greater value is higher priority. This is + used to decide the order in which function dispatch must happen. For + instance, a version specialized for SSE4.2 should be checked for dispatch + before a version for SSE3, as SSE4.2 implies SSE3. */ +enum feature_priority +{ + P_NONE = 0, + P_MMX, + P_SSE, + P_SSE2, + P_X86_64_BASELINE, + P_SSE3, + P_SSSE3, + P_PROC_SSSE3, + P_SSE4_A, + P_PROC_SSE4_A, + P_SSE4_1, + P_SSE4_2, + P_PROC_SSE4_2, + P_POPCNT, + P_X86_64_V2, + P_AES, + P_PCLMUL, + P_AVX, + P_PROC_AVX, + P_BMI, + P_PROC_BMI, + P_FMA4, + P_XOP, + P_PROC_XOP, + P_FMA, + P_PROC_FMA, + P_BMI2, + P_AVX2, + P_PROC_AVX2, + P_X86_64_V3, + P_AVX512F, + P_PROC_AVX512F, + P_X86_64_V4, + P_PROC_DYNAMIC +}; + +/* ISA Features supported. New features have to be inserted at the end. */ + +enum processor_features +{ + FEATURE_CMOV = 0, + FEATURE_MMX, + FEATURE_POPCNT, + FEATURE_SSE, + FEATURE_SSE2, + FEATURE_SSE3, + FEATURE_SSSE3, + FEATURE_SSE4_1, + FEATURE_SSE4_2, + FEATURE_AVX, + FEATURE_AVX2, + FEATURE_SSE4_A, + FEATURE_FMA4, + FEATURE_XOP, + FEATURE_FMA, + FEATURE_AVX512F, + FEATURE_BMI, + FEATURE_BMI2, + FEATURE_AES, + FEATURE_PCLMUL, + FEATURE_AVX512VL, + FEATURE_AVX512BW, + FEATURE_AVX512DQ, + FEATURE_AVX512CD, + FEATURE_AVX512ER, + FEATURE_AVX512PF, + FEATURE_AVX512VBMI, + FEATURE_AVX512IFMA, + FEATURE_AVX5124VNNIW, + FEATURE_AVX5124FMAPS, + FEATURE_AVX512VPOPCNTDQ, + FEATURE_AVX512VBMI2, + FEATURE_GFNI, + FEATURE_VPCLMULQDQ, + FEATURE_AVX512VNNI, + FEATURE_AVX512BITALG, + FEATURE_AVX512BF16, + FEATURE_AVX512VP2INTERSECT, + FEATURE_3DNOW, + FEATURE_3DNOWP, + FEATURE_ADX, + FEATURE_ABM, + FEATURE_CLDEMOTE, + FEATURE_CLFLUSHOPT, + FEATURE_CLWB, + FEATURE_CLZERO, + FEATURE_CMPXCHG16B, + FEATURE_CMPXCHG8B, + FEATURE_ENQCMD, + FEATURE_F16C, + FEATURE_FSGSBASE, + FEATURE_FXSAVE, + FEATURE_HLE, + FEATURE_IBT, + FEATURE_LAHF_LM, + FEATURE_LM, + FEATURE_LWP, + FEATURE_LZCNT, + FEATURE_MOVBE, + FEATURE_MOVDIR64B, + FEATURE_MOVDIRI, + FEATURE_MWAITX, + FEATURE_OSXSAVE, + FEATURE_PCONFIG, + FEATURE_PKU, + FEATURE_PREFETCHWT1, + FEATURE_PRFCHW, + FEATURE_PTWRITE, + FEATURE_RDPID, + FEATURE_RDRND, + FEATURE_RDSEED, + FEATURE_RTM, + FEATURE_SERIALIZE, + FEATURE_SGX, + FEATURE_SHA, + FEATURE_SHSTK, + FEATURE_TBM, + FEATURE_TSXLDTRK, + FEATURE_VAES, + FEATURE_WAITPKG, + FEATURE_WBNOINVD, + FEATURE_XSAVE, + FEATURE_XSAVEC, + FEATURE_XSAVEOPT, + FEATURE_XSAVES, + FEATURE_AMX_TILE, + FEATURE_AMX_INT8, + FEATURE_AMX_BF16, + FEATURE_UINTR, + FEATURE_HRESET, + FEATURE_KL, + FEATURE_AESKLE, + FEATURE_WIDEKL, + FEATURE_AVXVNNI, + FEATURE_AVX512FP16, + FEATURE_X86_64_BASELINE, + FEATURE_X86_64_V2, + FEATURE_X86_64_V3, + FEATURE_X86_64_V4, + CPU_FEATURE_MAX +}; + +/* Size of __cpu_features2 array in libgcc/config/i386/cpuinfo.c. */ +#define SIZE_OF_CPU_FEATURES ((CPU_FEATURE_MAX - 1) / 32) + +/* These are the values for vendor types, cpu types and subtypes. Cpu + types and subtypes should be subtracted by the corresponding start + value. */ + +#define M_CPU_TYPE_START (BUILTIN_VENDOR_MAX) +#define M_CPU_SUBTYPE_START \ + (M_CPU_TYPE_START + BUILTIN_CPU_TYPE_MAX) +#define M_VENDOR(a) (a) +#define M_CPU_TYPE(a) (M_CPU_TYPE_START + a) +#define M_CPU_SUBTYPE(a) (M_CPU_SUBTYPE_START + a) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/conditions.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/conditions.h new file mode 100644 index 0000000..435d7ad --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/conditions.h @@ -0,0 +1,69 @@ +/* Definitions for condition code handling in final.cc and output routines. + Copyright (C) 1987-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CONDITIONS_H +#define GCC_CONDITIONS_H + +/* These are the machine-independent flags: */ + +/* Set if the sign of the cc value is inverted: + output a following jump-if-less as a jump-if-greater, etc. */ +#define CC_REVERSED 1 + +/* This bit means that the current setting of the N bit is bogus + and conditional jumps should use the Z bit in its place. + This state obtains when an extraction of a signed single-bit field + or an arithmetic shift right of a byte by 7 bits + is turned into a btst, because btst does not set the N bit. */ +#define CC_NOT_POSITIVE 2 + +/* This bit means that the current setting of the N bit is bogus + and conditional jumps should pretend that the N bit is clear. + Used after extraction of an unsigned bit + or logical shift right of a byte by 7 bits is turned into a btst. + The btst does not alter the N bit, but the result of that shift + or extract is never negative. */ +#define CC_NOT_NEGATIVE 4 + +/* This bit means that the current setting of the overflow flag + is bogus and conditional jumps should pretend there is no overflow. */ +/* ??? Note that for most targets this macro is misnamed as it applies + to the carry flag, not the overflow flag. */ +#define CC_NO_OVERFLOW 010 + +/* This bit means that what ought to be in the Z bit + should be tested as the complement of the N bit. */ +#define CC_Z_IN_NOT_N 020 + +/* This bit means that what ought to be in the Z bit + should be tested as the N bit. */ +#define CC_Z_IN_N 040 + +/* Nonzero if we must invert the sense of the following branch, i.e. + change EQ to NE. This is not safe for IEEE floating point operations! + It is intended for use only when a combination of arithmetic + or logical insns can leave the condition codes set in a fortuitous + (though inverted) state. */ +#define CC_INVERTED 0100 + +/* Nonzero if we must convert signed condition operators to unsigned. + This is only used by machine description files. */ +#define CC_NOT_SIGNED 0200 + +#endif /* GCC_CONDITIONS_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config.h new file mode 100644 index 0000000..aa6dd6b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config.h @@ -0,0 +1,10 @@ +#ifndef GCC_CONFIG_H +#define GCC_CONFIG_H +#ifdef GENERATOR_FILE +#error config.h is for the host, not build, machine. +#endif +#include "auto-host.h" +#ifdef IN_GCC +# include "ansidecl.h" +#endif +#endif /* GCC_CONFIG_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/dbxelf.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/dbxelf.h new file mode 100644 index 0000000..4b90e95 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/dbxelf.h @@ -0,0 +1,68 @@ +/* Definitions needed when using stabs embedded in ELF sections. + Copyright (C) 1999-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* This file may be included by any ELF target which wishes to + support -gstabs generating stabs in sections, as produced by gas + and understood by gdb. */ + +#ifndef GCC_DBX_ELF_H +#define GCC_DBX_ELF_H + +/* Output DBX (stabs) debugging information if doing -gstabs. */ + +#define DBX_DEBUGGING_INFO 1 + +/* Make LBRAC and RBRAC addresses relative to the start of the + function. The native Solaris stabs debugging format works this + way, gdb expects it, and it reduces the number of relocation + entries... */ + +#define DBX_BLOCKS_FUNCTION_RELATIVE 1 + +/* ... but, to make this work, functions must appear prior to line info. */ + +#define DBX_FUNCTION_FIRST + +/* When generating stabs debugging, use N_BINCL entries. */ + +#define DBX_USE_BINCL + +/* There is no limit to the length of stabs strings. */ + +#ifndef DBX_CONTIN_LENGTH +#define DBX_CONTIN_LENGTH 0 +#endif + +/* Like block addresses, stabs line numbers are relative to the + current function. */ + +#define DBX_LINES_FUNCTION_RELATIVE 1 + +/* Generate a blank trailing N_SO to mark the end of the .o file, since + we can't depend upon the linker to mark .o file boundaries with + embedded stabs. */ + +#define DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END + +#endif /* ! GCC_DBX_ELF_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/elfos.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/elfos.h new file mode 100644 index 0000000..acb3762 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/elfos.h @@ -0,0 +1,490 @@ +/* elfos.h -- operating system specific defines to be used when + targeting GCC for some generic ELF system + Copyright (C) 1991-2022 Free Software Foundation, Inc. + Based on svr4.h contributed by Ron Guilmette (rfg@netcom.com). + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#define TARGET_OBJFMT_CPP_BUILTINS() \ + do \ + { \ + builtin_define ("__ELF__"); \ + } \ + while (0) + +/* Define a symbol indicating that we are using elfos.h. + Some CPU specific configuration files use this. */ +#define USING_ELFOS_H + +/* The prefix to add to user-visible assembler symbols. + + For ELF systems the convention is *not* to prepend a leading + underscore onto user-level symbol names. */ + +#undef USER_LABEL_PREFIX +#define USER_LABEL_PREFIX "" + +/* The biggest alignment supported by ELF in bits. 32-bit ELF + supports section alignment up to (0x80000000 * 8), while + 64-bit ELF supports (0x8000000000000000 * 8). If this macro + is not defined, the default is the largest alignment supported + by 32-bit ELF and representable on a 32-bit host. Use this + macro to limit the alignment which can be specified using + the `__attribute__ ((aligned (N)))' construct. */ +#ifndef MAX_OFILE_ALIGNMENT +#define MAX_OFILE_ALIGNMENT (((unsigned int) 1 << 28) * 8) +#endif + +/* Use periods rather than dollar signs in special g++ assembler names. */ + +#define NO_DOLLAR_IN_LABEL + +/* Writing `int' for a bit-field forces int alignment for the structure. */ + +#ifndef PCC_BITFIELD_TYPE_MATTERS +#define PCC_BITFIELD_TYPE_MATTERS 1 +#endif + +/* All ELF targets can support DWARF-2. */ + +#define DWARF2_DEBUGGING_INFO 1 + +/* All ELF targets can support CTF. */ + +#define CTF_DEBUGGING_INFO 1 + +/* All ELF targets can support BTF. */ + +#define BTF_DEBUGGING_INFO 1 + +/* The GNU tools operate better with dwarf2, and it is required by some + psABI's. Since we don't have any native tools to be compatible with, + default to dwarf2. */ + +#ifndef PREFERRED_DEBUGGING_TYPE +#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG +#endif + +/* All SVR4 targets use the ELF object file format. */ +#define OBJECT_FORMAT_ELF + + +/* Output #ident as a .ident. */ + +#undef TARGET_ASM_OUTPUT_IDENT +#define TARGET_ASM_OUTPUT_IDENT default_asm_output_ident_directive + +#undef SET_ASM_OP +#define SET_ASM_OP "\t.set\t" + +/* Most svr4 assemblers want a .file directive at the beginning of + their input file. */ +#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true + +/* This is how to allocate empty space in some section. The .zero + pseudo-op is used for this on most svr4 assemblers. */ + +#define SKIP_ASM_OP "\t.zero\t" + +#undef ASM_OUTPUT_SKIP +#define ASM_OUTPUT_SKIP(FILE, SIZE) \ + fprintf ((FILE), "%s" HOST_WIDE_INT_PRINT_UNSIGNED "\n",\ + SKIP_ASM_OP, (SIZE)) + +/* This is how to store into the string LABEL + the symbol_ref name of an internal numbered label where + PREFIX is the class of label and NUM is the number within the class. + This is suitable for output with `assemble_name'. + + For most svr4 systems, the convention is that any symbol which begins + with a period is not put into the linker symbol table by the assembler. */ + +#undef ASM_GENERATE_INTERNAL_LABEL +#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \ + do \ + { \ + char *__p; \ + (LABEL)[0] = '*'; \ + (LABEL)[1] = '.'; \ + __p = stpcpy (&(LABEL)[2], PREFIX); \ + sprint_ul (__p, (unsigned long) (NUM)); \ + } \ + while (0) + +/* Output the label which precedes a jumptable. Note that for all svr4 + systems where we actually generate jumptables (which is to say every + svr4 target except i386, where we use casesi instead) we put the jump- + tables into the .rodata section and since other stuff could have been + put into the .rodata section prior to any given jumptable, we have to + make sure that the location counter for the .rodata section gets pro- + perly re-aligned prior to the actual beginning of the jump table. */ + +#undef ALIGN_ASM_OP +#define ALIGN_ASM_OP "\t.align\t" + +#ifndef ASM_OUTPUT_BEFORE_CASE_LABEL +#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \ + ASM_OUTPUT_ALIGN ((FILE), 2) +#endif + +#undef ASM_OUTPUT_CASE_LABEL +#define ASM_OUTPUT_CASE_LABEL(FILE, PREFIX, NUM, JUMPTABLE) \ + do \ + { \ + ASM_OUTPUT_BEFORE_CASE_LABEL (FILE, PREFIX, NUM, JUMPTABLE); \ + (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); \ + } \ + while (0) + +/* The standard SVR4 assembler seems to require that certain builtin + library routines (e.g. .udiv) be explicitly declared as .globl + in each assembly file where they are referenced. */ + +#define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \ + (*targetm.asm_out.globalize_label) (FILE, XSTR (FUN, 0)) + +/* This says how to output assembler code to declare an + uninitialized external linkage data object. Under SVR4, + the linker seems to want the alignment of data objects + to depend on their types. We do exactly that here. */ + +#define COMMON_ASM_OP "\t.comm\t" + +#undef ASM_OUTPUT_ALIGNED_COMMON +#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \ + do \ + { \ + fprintf ((FILE), "%s", COMMON_ASM_OP); \ + assemble_name ((FILE), (NAME)); \ + fprintf ((FILE), "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n", \ + (SIZE), (ALIGN) / BITS_PER_UNIT); \ + } \ + while (0) + +/* This says how to output assembler code to declare an + uninitialized internal linkage data object. Under SVR4, + the linker seems to want the alignment of data objects + to depend on their types. We do exactly that here. */ + +#define LOCAL_ASM_OP "\t.local\t" + +#undef ASM_OUTPUT_ALIGNED_LOCAL +#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \ + do \ + { \ + fprintf ((FILE), "%s", LOCAL_ASM_OP); \ + assemble_name ((FILE), (NAME)); \ + fprintf ((FILE), "\n"); \ + ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \ + } \ + while (0) + +/* This is the pseudo-op used to generate a contiguous sequence of byte + values from a double-quoted string WITHOUT HAVING A TERMINATING NUL + AUTOMATICALLY APPENDED. This is the same for most svr4 assemblers. */ + +#undef ASCII_DATA_ASM_OP +#define ASCII_DATA_ASM_OP "\t.ascii\t" + +/* Support a read-only data section. */ +#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata" + +/* On svr4, we *do* have support for the .init and .fini sections, and we + can put stuff in there to be executed before and after `main'. We let + crtstuff.c and other files know this by defining the following symbols. + The definitions say how to change sections to the .init and .fini + sections. This is the same for all known svr4 assemblers. */ + +#define INIT_SECTION_ASM_OP "\t.section\t.init" +#define FINI_SECTION_ASM_OP "\t.section\t.fini" + +/* Output assembly directive to move to the beginning of current section. */ +#ifdef HAVE_GAS_SUBSECTION_ORDERING +# define ASM_SECTION_START_OP "\t.subsection\t-1" +# define ASM_OUTPUT_SECTION_START(FILE) \ + fprintf ((FILE), "%s\n", ASM_SECTION_START_OP) +#endif + +#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1) + +/* Switch into a generic section. */ +#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section + +#undef TARGET_ASM_SELECT_RTX_SECTION +#define TARGET_ASM_SELECT_RTX_SECTION default_elf_select_rtx_section +#undef TARGET_ASM_SELECT_SECTION +#define TARGET_ASM_SELECT_SECTION default_elf_select_section +#undef TARGET_HAVE_SWITCHABLE_BSS_SECTIONS +#define TARGET_HAVE_SWITCHABLE_BSS_SECTIONS true + +/* Define the strings used for the special svr4 .type and .size directives. + These strings generally do not vary from one system running svr4 to + another, but if a given system (e.g. m88k running svr) needs to use + different pseudo-op names for these, they may be overridden in the + file which includes this one. */ + +#define TYPE_ASM_OP "\t.type\t" +#define SIZE_ASM_OP "\t.size\t" + +/* This is how we tell the assembler that a symbol is weak. */ + +#define ASM_WEAKEN_LABEL(FILE, NAME) \ + do \ + { \ + fputs ("\t.weak\t", (FILE)); \ + assemble_name ((FILE), (NAME)); \ + fputc ('\n', (FILE)); \ + } \ + while (0) + +#define ASM_OUTPUT_SYMVER_DIRECTIVE(FILE, NAME, NAME2) \ + do \ + { \ + fputs ("\t.symver\t", (FILE)); \ + assemble_name ((FILE), (NAME)); \ + fputs (", ", (FILE)); \ + assemble_name ((FILE), (NAME2)); \ + fputc ('\n', (FILE)); \ + } \ + while (0) + +/* The following macro defines the format used to output the second + operand of the .type assembler directive. Different svr4 assemblers + expect various different forms for this operand. The one given here + is just a default. You may need to override it in your machine- + specific tm.h file (depending upon the particulars of your assembler). */ + +#define TYPE_OPERAND_FMT "@%s" + +/* Write the extra assembler code needed to declare a function's result. + Most svr4 assemblers don't require any special declaration of the + result value, but there are exceptions. */ + +#ifndef ASM_DECLARE_RESULT +#define ASM_DECLARE_RESULT(FILE, RESULT) +#endif + +/* These macros generate the special .type and .size directives which + are used to set the corresponding fields of the linker symbol table + entries in an ELF object file under SVR4. These macros also output + the starting labels for the relevant functions/objects. */ + +/* Write the extra assembler code needed to declare a function properly. + Some svr4 assemblers need to also have something extra said about the + function's return value. We allow for that here. */ + +#ifndef ASM_DECLARE_FUNCTION_NAME +#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \ + do \ + { \ + ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \ + ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \ + ASM_OUTPUT_FUNCTION_LABEL (FILE, NAME, DECL); \ + } \ + while (0) +#endif + +/* Write the extra assembler code needed to declare the name of a + cold function partition properly. Some svr4 assemblers need to also + have something extra said about the function's return value. We + allow for that here. */ + +#ifndef ASM_DECLARE_COLD_FUNCTION_NAME +#define ASM_DECLARE_COLD_FUNCTION_NAME(FILE, NAME, DECL) \ + do \ + { \ + ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \ + ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \ + ASM_OUTPUT_FUNCTION_LABEL (FILE, NAME, DECL); \ + } \ + while (0) +#endif + +/* Write the extra assembler code needed to declare an object properly. */ + +#ifdef HAVE_GAS_GNU_UNIQUE_OBJECT +#define USE_GNU_UNIQUE_OBJECT flag_gnu_unique +#else +#define USE_GNU_UNIQUE_OBJECT 0 +#endif + +#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \ + do \ + { \ + HOST_WIDE_INT size; \ + \ + /* For template static data member instantiations or \ + inline fn local statics and their guard variables, use \ + gnu_unique_object so that they will be combined even under \ + RTLD_LOCAL. Don't use gnu_unique_object for typeinfo, \ + vtables and other read-only artificial decls. */ \ + if (USE_GNU_UNIQUE_OBJECT && DECL_ONE_ONLY (DECL) \ + && (!DECL_ARTIFICIAL (DECL) || !TREE_READONLY (DECL))) \ + ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "gnu_unique_object"); \ + else \ + ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \ + \ + size_directive_output = 0; \ + if (!flag_inhibit_size_directive \ + && (DECL) && DECL_SIZE (DECL)) \ + { \ + size_directive_output = 1; \ + size = tree_to_uhwi (DECL_SIZE_UNIT (DECL)); \ + ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, size); \ + } \ + \ + ASM_OUTPUT_LABEL (FILE, NAME); \ + } \ + while (0) + +/* Output the size directive for a decl in rest_of_decl_compilation + in the case where we did not do so before the initializer. + Once we find the error_mark_node, we know that the value of + size_directive_output was set + by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */ + +#undef ASM_FINISH_DECLARE_OBJECT +#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END)\ + do \ + { \ + const char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \ + HOST_WIDE_INT size; \ + \ + if (!flag_inhibit_size_directive \ + && DECL_SIZE (DECL) \ + && ! AT_END && TOP_LEVEL \ + && DECL_INITIAL (DECL) == error_mark_node \ + && !size_directive_output) \ + { \ + size_directive_output = 1; \ + size = tree_to_uhwi (DECL_SIZE_UNIT (DECL)); \ + ASM_OUTPUT_SIZE_DIRECTIVE (FILE, name, size); \ + } \ + } \ + while (0) + +/* This is how to declare the size of a function. */ +#ifndef ASM_DECLARE_FUNCTION_SIZE +#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \ + do \ + { \ + if (!flag_inhibit_size_directive) \ + ASM_OUTPUT_MEASURED_SIZE (FILE, FNAME); \ + } \ + while (0) +#endif + +/* This is how to declare the size of a cold function partition. */ +#ifndef ASM_DECLARE_COLD_FUNCTION_SIZE +#define ASM_DECLARE_COLD_FUNCTION_SIZE(FILE, FNAME, DECL) \ + do \ + { \ + if (!flag_inhibit_size_directive) \ + ASM_OUTPUT_MEASURED_SIZE (FILE, FNAME); \ + } \ + while (0) +#endif + +/* A table of bytes codes used by the ASM_OUTPUT_ASCII and + ASM_OUTPUT_LIMITED_STRING macros. Each byte in the table + corresponds to a particular byte value [0..255]. For any + given byte value, if the value in the corresponding table + position is zero, the given character can be output directly. + If the table value is 1, the byte must be output as a \ooo + octal escape. If the tables value is anything else, then the + byte value should be output as a \ followed by the value + in the table. Note that we can use standard UN*X escape + sequences for many control characters, but we don't use + \a to represent BEL because some svr4 assemblers (e.g. on + the i386) don't know about that. Also, we don't use \v + since some versions of gas, such as 2.2 did not accept it. */ + +#define ELF_ASCII_ESCAPES \ +"\1\1\1\1\1\1\1\1btn\1fr\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\ +\0\0\"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\\\0\0\0\ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\ +\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\ +\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\ +\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\ +\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1" + +/* Some svr4 assemblers have a limit on the number of characters which + can appear in the operand of a .string directive. If your assembler + has such a limitation, you should define STRING_LIMIT to reflect that + limit. Note that at least some svr4 assemblers have a limit on the + actual number of bytes in the double-quoted string, and that they + count each character in an escape sequence as one byte. Thus, an + escape sequence like \377 would count as four bytes. + + If your target assembler doesn't support the .string directive, you + should define this to zero. +*/ + +#define ELF_STRING_LIMIT ((unsigned) 256) + +#define STRING_ASM_OP "\t.string\t" + +/* The routine used to output NUL terminated strings. We use a special + version of this for most svr4 targets because doing so makes the + generated assembly code more compact (and thus faster to assemble) + as well as more readable, especially for targets like the i386 + (where the only alternative is to output character sequences as + comma separated lists of numbers). */ + +#define ASM_OUTPUT_LIMITED_STRING(FILE, STR) \ + default_elf_asm_output_limited_string ((FILE), (STR)) + +/* The routine used to output sequences of byte values. We use a special + version of this for most svr4 targets because doing so makes the + generated assembly code more compact (and thus faster to assemble) + as well as more readable. Note that if we find subparts of the + character sequence which end with NUL (and which are shorter than + STRING_LIMIT) we output those using ASM_OUTPUT_LIMITED_STRING. */ + +#undef ASM_OUTPUT_ASCII +#define ASM_OUTPUT_ASCII(FILE, STR, LENGTH) \ + default_elf_asm_output_ascii ((FILE), (STR), (LENGTH)) + +/* Allow the use of the -frecord-gcc-switches switch via the + elf_record_gcc_switches function defined in varasm.cc. */ +#undef TARGET_ASM_RECORD_GCC_SWITCHES +#define TARGET_ASM_RECORD_GCC_SWITCHES elf_record_gcc_switches + +/* A C statement (sans semicolon) to output to the stdio stream STREAM + any text necessary for declaring the name of an external symbol + named NAME which is referenced in this compilation but not defined. + It is needed to properly support non-default visibility. */ + +#ifndef ASM_OUTPUT_EXTERNAL +#define ASM_OUTPUT_EXTERNAL(FILE, DECL, NAME) \ + default_elf_asm_output_external (FILE, DECL, NAME) +#endif + +#undef TARGET_LIBC_HAS_FUNCTION +#define TARGET_LIBC_HAS_FUNCTION no_c99_libc_has_function + +/* ELF support needed only by D front-end. */ + +#define TARGET_D_MINFO_SECTION "minfo" +#define TARGET_D_MINFO_START_NAME "__start_minfo" +#define TARGET_D_MINFO_END_NAME "__stop_minfo" diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/glibc-stdint.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/glibc-stdint.h new file mode 100644 index 0000000..a3652d0 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/glibc-stdint.h @@ -0,0 +1,64 @@ +/* Definitions for types on systems using GNU libc or uClibc. + Copyright (C) 2008-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* Systems using musl libc should use this header and make sure + OPTION_MUSL is defined correctly before using the TYPE macros. */ +#ifndef OPTION_MUSL +#define OPTION_MUSL 0 +#endif +#ifndef OPTION_MUSL_P +#define OPTION_MUSL_P(opts) 0 +#endif + +#define SIG_ATOMIC_TYPE "int" + +#define INT8_TYPE "signed char" +#define INT16_TYPE "short int" +#define INT32_TYPE "int" +#define INT64_TYPE (LONG_TYPE_SIZE == 64 ? "long int" : "long long int") +#define UINT8_TYPE "unsigned char" +#define UINT16_TYPE "short unsigned int" +#define UINT32_TYPE "unsigned int" +#define UINT64_TYPE (LONG_TYPE_SIZE == 64 ? "long unsigned int" : "long long unsigned int") + +#define INT_LEAST8_TYPE "signed char" +#define INT_LEAST16_TYPE "short int" +#define INT_LEAST32_TYPE "int" +#define INT_LEAST64_TYPE (LONG_TYPE_SIZE == 64 ? "long int" : "long long int") +#define UINT_LEAST8_TYPE "unsigned char" +#define UINT_LEAST16_TYPE "short unsigned int" +#define UINT_LEAST32_TYPE "unsigned int" +#define UINT_LEAST64_TYPE (LONG_TYPE_SIZE == 64 ? "long unsigned int" : "long long unsigned int") + +#define INT_FAST8_TYPE "signed char" +#define INT_FAST16_TYPE (LONG_TYPE_SIZE == 64 && !OPTION_MUSL ? "long int" : "int") +#define INT_FAST32_TYPE (LONG_TYPE_SIZE == 64 && !OPTION_MUSL ? "long int" : "int") +#define INT_FAST64_TYPE (LONG_TYPE_SIZE == 64 ? "long int" : "long long int") +#define UINT_FAST8_TYPE "unsigned char" +#define UINT_FAST16_TYPE (LONG_TYPE_SIZE == 64 && !OPTION_MUSL ? "long unsigned int" : "unsigned int") +#define UINT_FAST32_TYPE (LONG_TYPE_SIZE == 64 && !OPTION_MUSL ? "long unsigned int" : "unsigned int") +#define UINT_FAST64_TYPE (LONG_TYPE_SIZE == 64 ? "long unsigned int" : "long long unsigned int") + +#define INTPTR_TYPE (LONG_TYPE_SIZE == 64 ? "long int" : "int") +#define UINTPTR_TYPE (LONG_TYPE_SIZE == 64 ? "long unsigned int" : "unsigned int") diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/gnu-user.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/gnu-user.h new file mode 100644 index 0000000..857c0e0 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/gnu-user.h @@ -0,0 +1,158 @@ +/* Definitions for systems using, at least optionally, a GNU + (glibc-based) userspace or other userspace with libc derived from + glibc (e.g. uClibc) or for which similar specs are appropriate. + Copyright (C) 1995-2022 Free Software Foundation, Inc. + Contributed by Eric Youngdale. + Modified for stabs-in-ELF by H.J. Lu (hjl@lucon.org). + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#undef ASM_APP_ON +#define ASM_APP_ON "#APP\n" + +#undef ASM_APP_OFF +#define ASM_APP_OFF "#NO_APP\n" + +#if ENABLE_OFFLOADING == 1 +#define CRTOFFLOADBEGIN "%{fopenacc|fopenmp:crtoffloadbegin%O%s}" +#define CRTOFFLOADEND "%{fopenacc|fopenmp:crtoffloadend%O%s}" +#else +#define CRTOFFLOADBEGIN "" +#define CRTOFFLOADEND "" +#endif + +#define GNU_USER_TARGET_CRTI "crti.o%s" +#define GNU_USER_TARGET_CRTN "crtn.o%s" + +/* Provide a STARTFILE_SPEC appropriate for GNU userspace. Here we add + the GNU userspace magical crtbegin.o file (see crtstuff.c) which + provides part of the support for getting C++ file-scope static + object constructed before entering `main'. */ + +#define GNU_USER_TARGET_STARTFILE_SPEC \ + "%{shared:; \ + pg|p|profile:%{static-pie:grcrt1.o%s;:gcrt1.o%s}; \ + static:crt1.o%s; \ + static-pie:rcrt1.o%s; \ + " PIE_SPEC ":Scrt1.o%s; \ + :crt1.o%s} " \ + GNU_USER_TARGET_CRTI " \ + %{static:crtbeginT.o%s; \ + shared|static-pie|" PIE_SPEC ":crtbeginS.o%s; \ + :crtbegin.o%s} \ + %{fvtable-verify=none:%s; \ + fvtable-verify=preinit:vtv_start_preinit.o%s; \ + fvtable-verify=std:vtv_start.o%s} " \ + CRTOFFLOADBEGIN +#undef STARTFILE_SPEC +#define STARTFILE_SPEC GNU_USER_TARGET_STARTFILE_SPEC + +/* Provide a ENDFILE_SPEC appropriate for GNU userspace. Here we tack on + the GNU userspace magical crtend.o file (see crtstuff.c) which + provides part of the support for getting C++ file-scope static + object constructed before entering `main', followed by a normal + GNU userspace "finalizer" file, `crtn.o'. */ + +#define GNU_USER_TARGET_ENDFILE_SPEC \ + "%{!static:%{fvtable-verify=none:%s; \ + fvtable-verify=preinit:vtv_end_preinit.o%s; \ + fvtable-verify=std:vtv_end.o%s}} \ + %{static:crtend.o%s; \ + shared|static-pie|" PIE_SPEC ":crtendS.o%s; \ + :crtend.o%s} " \ + GNU_USER_TARGET_CRTN " " \ + CRTOFFLOADEND +#undef ENDFILE_SPEC +#define ENDFILE_SPEC GNU_USER_TARGET_ENDFILE_SPEC + +/* This is for -profile to use -lc_p instead of -lc. */ +#define GNU_USER_TARGET_CC1_SPEC "%{profile:-p}" +#ifndef CC1_SPEC +#define CC1_SPEC GNU_USER_TARGET_CC1_SPEC +#endif + +/* The GNU C++ standard library requires that these macros be defined. */ +#undef CPLUSPLUS_CPP_SPEC +#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)" + +#define GNU_USER_TARGET_NO_PTHREADS_LIB_SPEC \ + "%{shared:-lc} \ + %{!shared:%{profile:-lc_p}%{!profile:-lc}}" + +#define GNU_USER_TARGET_LIB_SPEC \ + "%{pthread:-lpthread} " \ + GNU_USER_TARGET_NO_PTHREADS_LIB_SPEC + +#undef LIB_SPEC +#define LIB_SPEC GNU_USER_TARGET_LIB_SPEC + +#if defined(HAVE_LD_EH_FRAME_HDR) +#define LINK_EH_SPEC "%{!static|static-pie:--eh-frame-hdr} " +#endif + +#define GNU_USER_TARGET_LINK_GCC_C_SEQUENCE_SPEC \ + "%{static|static-pie:--start-group} %G %{!nolibc:%L} \ + %{static|static-pie:--end-group}%{!static:%{!static-pie:%G}}" + +#undef LINK_GCC_C_SEQUENCE_SPEC +#define LINK_GCC_C_SEQUENCE_SPEC GNU_USER_TARGET_LINK_GCC_C_SEQUENCE_SPEC + +/* Use --as-needed -lgcc_s for eh support. */ +#ifdef HAVE_LD_AS_NEEDED +#define USE_LD_AS_NEEDED 1 +#endif + +#define TARGET_POSIX_IO + +#undef TARGET_LIBC_HAS_FUNCTION +#define TARGET_LIBC_HAS_FUNCTION gnu_libc_has_function + +/* Link -lasan early on the command line. For -static-libasan, don't link + it for -shared link, the executable should be compiled with -static-libasan + in that case, and for executable link with --{,no-}whole-archive around + it to force everything into the executable. And similarly for -ltsan, + -lhwasan, and -llsan. */ +#if defined(HAVE_LD_STATIC_DYNAMIC) +#undef LIBASAN_EARLY_SPEC +#define LIBASAN_EARLY_SPEC "%{!shared:libasan_preinit%O%s} " \ + "%{static-libasan:%{!shared:" \ + LD_STATIC_OPTION " --whole-archive -lasan --no-whole-archive " \ + LD_DYNAMIC_OPTION "}}%{!static-libasan:-lasan}" +#undef LIBHWASAN_EARLY_SPEC +#define LIBHWASAN_EARLY_SPEC "%{static-libhwasan:%{!shared:" \ + LD_STATIC_OPTION " --whole-archive -lhwasan --no-whole-archive " \ + LD_DYNAMIC_OPTION "}}%{!static-libhwasan:-lhwasan}" +#undef LIBTSAN_EARLY_SPEC +#define LIBTSAN_EARLY_SPEC "%{!shared:libtsan_preinit%O%s} " \ + "%{static-libtsan:%{!shared:" \ + LD_STATIC_OPTION " --whole-archive -ltsan --no-whole-archive " \ + LD_DYNAMIC_OPTION "}}%{!static-libtsan:-ltsan}" +#undef LIBLSAN_EARLY_SPEC +#define LIBLSAN_EARLY_SPEC "%{!shared:liblsan_preinit%O%s} " \ + "%{static-liblsan:%{!shared:" \ + LD_STATIC_OPTION " --whole-archive -llsan --no-whole-archive " \ + LD_DYNAMIC_OPTION "}}%{!static-liblsan:-llsan}" +#endif + +#undef TARGET_F951_OPTIONS +#define TARGET_F951_OPTIONS "%{!nostdinc:\ + %:fortran-preinclude-file(-fpre-include= math-vector-fortran.h finclude%s/)}" diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/att.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/att.h new file mode 100644 index 0000000..2d9475d --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/att.h @@ -0,0 +1,91 @@ +/* Definitions for AT&T assembler syntax for the Intel 80386. + Copyright (C) 1988-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + + +/* Define the syntax of instructions and addresses. */ + +/* Prefix for internally generated assembler labels. */ +#define LPREFIX ".L" + +/* Assembler pseudos to introduce constants of various size. */ + +#define ASM_BYTE "\t.byte\t" +#define ASM_SHORT "\t.value\t" +#define ASM_LONG "\t.long\t" +#define ASM_QUAD "\t.quad\t" /* Should not be used for 32bit compilation. */ + +/* How to output an ASCII string constant. */ + +#undef ASM_OUTPUT_ASCII +#define ASM_OUTPUT_ASCII(FILE, PTR, SIZE) \ +do \ +{ size_t i = 0, limit = (SIZE); \ + while (i < limit) \ + { if (i%10 == 0) { if (i!=0) putc ('\n', (FILE)); \ + fputs (ASM_BYTE, (FILE)); } \ + else putc (',', (FILE)); \ + fprintf ((FILE), "0x%x", ((PTR)[i++] & 0377)) ;} \ + putc ('\n', (FILE)); \ +} while (0) + +/* Output at beginning of assembler file. */ +#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true + +/* This is how to output an assembler line + that says to advance the location counter + to a multiple of 2**LOG bytes. */ + +#define ASM_OUTPUT_ALIGN(FILE,LOG) \ + if ((LOG) != 0) fprintf ((FILE), "\t.align %d\n", 1 << (LOG)) + +/* This is how to output an assembler line + that says to advance the location counter by SIZE bytes. */ + +#undef ASM_OUTPUT_SKIP +#define ASM_OUTPUT_SKIP(FILE,SIZE) \ + fprintf ((FILE), "\t.set .,.+%u\n", (int)(SIZE)) + +/* Can't use ASM_OUTPUT_SKIP in text section; it doesn't leave 0s. */ + +#define ASM_NO_SKIP_IN_TEXT 1 + +/* Define the syntax of labels and symbol definitions/declarations. */ + +/* The prefix to add for compiler private assembler symbols. */ +#undef LOCAL_LABEL_PREFIX +#define LOCAL_LABEL_PREFIX "." + +/* This is how to store into the string BUF + the symbol_ref name of an internal numbered label where + PREFIX is the class of label and NUM is the number within the class. + This is suitable for output with `assemble_name'. */ + +#undef ASM_GENERATE_INTERNAL_LABEL +#define ASM_GENERATE_INTERNAL_LABEL(BUF,PREFIX,NUMBER) \ + sprintf ((BUF), LOCAL_LABEL_PREFIX "%s%ld", (PREFIX), (long)(NUMBER)) + +/* The prefix to add to user-visible assembler symbols. */ + +#undef USER_LABEL_PREFIX +#define USER_LABEL_PREFIX "" diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/biarch64.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/biarch64.h new file mode 100644 index 0000000..1a15d7f --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/biarch64.h @@ -0,0 +1,29 @@ +/* Make configure files to produce biarch compiler defaulting to 64bit mode. + This file must be included very first, while the OS specific file later + to overwrite otherwise wrong defaults. + Copyright (C) 2001-2022 Free Software Foundation, Inc. + Contributed by Bo Thorsen . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#define TARGET_64BIT_DEFAULT (OPTION_MASK_ISA_64BIT | OPTION_MASK_ABI_64) +#define TARGET_BI_ARCH 1 diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/gnu-user-common.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/gnu-user-common.h new file mode 100644 index 0000000..23b54c5 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/gnu-user-common.h @@ -0,0 +1,72 @@ +/* Common definitions for Intel 386 and AMD x86-64 systems using + GNU userspace. Copyright (C) 2012-2022 Free Software Foundation, Inc. + Contributed by Ilya Enkovich. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* The svr4 ABI for the i386 says that records and unions are returned + in memory. In the 64bit compilation we will turn this flag off in + ix86_option_override_internal, as we never do pcc_struct_return + scheme on this target. */ +#undef DEFAULT_PCC_STRUCT_RETURN +#define DEFAULT_PCC_STRUCT_RETURN 1 + +/* We arrange for the whole %fs segment to map the tls area. */ +#undef TARGET_TLS_DIRECT_SEG_REFS_DEFAULT +#define TARGET_TLS_DIRECT_SEG_REFS_DEFAULT MASK_TLS_DIRECT_SEG_REFS + +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + GNU_USER_TARGET_OS_CPP_BUILTINS(); \ + } \ + while (0) + +#undef CPP_SPEC +#define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}" + +#undef GNU_USER_TARGET_CC1_SPEC +#define GNU_USER_TARGET_CC1_SPEC "%(cc1_cpu) %{profile:-p}" + +#undef CC1_SPEC +#define CC1_SPEC GNU_USER_TARGET_CC1_SPEC + +/* Similar to standard GNU userspace, but adding -ffast-math support. */ +#define GNU_USER_TARGET_MATHFILE_SPEC \ + "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \ + %{mpc32:crtprec32.o%s} \ + %{mpc64:crtprec64.o%s} \ + %{mpc80:crtprec80.o%s}" + +#undef ENDFILE_SPEC +#define ENDFILE_SPEC \ + GNU_USER_TARGET_MATHFILE_SPEC " " \ + GNU_USER_TARGET_ENDFILE_SPEC + +#define TARGET_ASM_FILE_END file_end_indicate_exec_stack + +/* The stack pointer needs to be moved while checking the stack. */ +#define STACK_CHECK_MOVING_SP 1 + +/* Static stack checking is supported by means of probes. */ +#define STACK_CHECK_STATIC_BUILTIN 1 + +/* We only build the -fsplit-stack support in libgcc if the + assembler has full support for the CFI directives. */ +#if HAVE_GAS_CFI_PERSONALITY_DIRECTIVE +#define TARGET_CAN_SPLIT_STACK +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/gnu-user64.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/gnu-user64.h new file mode 100644 index 0000000..abc8cf6 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/gnu-user64.h @@ -0,0 +1,97 @@ +/* Definitions for AMD x86-64 using GNU userspace. + Copyright (C) 2001-2022 Free Software Foundation, Inc. + Contributed by Jan Hubicka , based on linux.h. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* Provide a LINK_SPEC. Here we provide support for the special GCC + options -static and -shared, which allow us to link things in one + of these three modes by applying the appropriate combinations of + options at link-time. + + When the -shared link option is used a final link is not being + done. */ + +#if TARGET_64BIT_DEFAULT +#define SPEC_32 "m16|m32" +#if TARGET_BI_ARCH == 2 +#define SPEC_64 "m64" +#define SPEC_X32 "m16|m32|m64:;" +#else +#define SPEC_64 "m16|m32|mx32:;" +#define SPEC_X32 "mx32" +#endif +#else +#define SPEC_32 "m64|mx32:;" +#define SPEC_64 "m64" +#define SPEC_X32 "mx32" +#endif + +#undef ASM_SPEC +#define ASM_SPEC "%{" SPEC_32 ":--32} \ + %{" SPEC_64 ":--64} \ + %{" SPEC_X32 ":--x32} \ + %{msse2avx:%{!mavx:-msse2avx}}" + +#define GNU_USER_TARGET_LINK_SPEC \ + "%{" SPEC_64 ":-m " GNU_USER_LINK_EMULATION64 "} \ + %{" SPEC_32 ":-m " GNU_USER_LINK_EMULATION32 "} \ + %{" SPEC_X32 ":-m " GNU_USER_LINK_EMULATIONX32 "} \ + %{shared:-shared} \ + %{!shared: \ + %{!static: \ + %{!static-pie: \ + %{rdynamic:-export-dynamic} \ + %{" SPEC_32 ":-dynamic-linker " GNU_USER_DYNAMIC_LINKER32 "} \ + %{" SPEC_64 ":-dynamic-linker " GNU_USER_DYNAMIC_LINKER64 "} \ + %{" SPEC_X32 ":-dynamic-linker " GNU_USER_DYNAMIC_LINKERX32 "}}} \ + %{static:-static} %{static-pie:-static -pie --no-dynamic-linker -z text}}" + +#undef LINK_SPEC +#define LINK_SPEC GNU_USER_TARGET_LINK_SPEC + +#if TARGET_64BIT_DEFAULT +#if TARGET_BI_ARCH == 2 +#define MULTILIB_DEFAULTS { "mx32" } +#else +#define MULTILIB_DEFAULTS { "m64" } +#endif +#else +#define MULTILIB_DEFAULTS { "m32" } +#endif + +#ifdef TARGET_LIBC_PROVIDES_SSP +/* i386 glibc provides __stack_chk_guard in %gs:0x14, + x32 glibc provides it in %fs:0x18. + x86_64 glibc provides it in %fs:0x28. */ +#define TARGET_THREAD_SSP_OFFSET \ + (TARGET_64BIT ? (TARGET_X32 ? 0x18 : 0x28) : 0x14) + +/* i386 glibc provides __private_ss in %gs:0x30. + x32 glibc provides it in %fs:0x40. + x86_64 glibc provides it in %fs:0x70. */ +#define TARGET_THREAD_SPLIT_STACK_OFFSET \ + (TARGET_64BIT ? (TARGET_X32 ? 0x40 : 0x70) : 0x30) +#endif + +#undef WCHAR_TYPE +#define WCHAR_TYPE (TARGET_LP64 ? "int" : "long int") diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/i386-isa.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/i386-isa.def new file mode 100644 index 0000000..83659d0 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/i386-isa.def @@ -0,0 +1,111 @@ +/* Definition for processor table alias flags. + Copyright (C) 2001-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +DEF_PTA(3DNOW) +DEF_PTA(3DNOW_A) +DEF_PTA(64BIT) +DEF_PTA(ABM) +DEF_PTA(AES) +DEF_PTA(AVX) +DEF_PTA(BMI) +DEF_PTA(CX16) +DEF_PTA(F16C) +DEF_PTA(FMA) +DEF_PTA(FMA4) +DEF_PTA(FSGSBASE) +DEF_PTA(LWP) +DEF_PTA(LZCNT) +DEF_PTA(MMX) +DEF_PTA(MOVBE) +DEF_PTA(NO_SAHF) +DEF_PTA(PCLMUL) +DEF_PTA(POPCNT) +DEF_PTA(PREFETCH_SSE) +DEF_PTA(RDRND) +DEF_PTA(SSE) +DEF_PTA(SSE2) +DEF_PTA(SSE3) +DEF_PTA(SSE4_1) +DEF_PTA(SSE4_2) +DEF_PTA(SSE4A) +DEF_PTA(SSSE3) +DEF_PTA(TBM) +DEF_PTA(XOP) +DEF_PTA(AVX2) +DEF_PTA(BMI2) +DEF_PTA(RTM) +DEF_PTA(HLE) +DEF_PTA(PRFCHW) +DEF_PTA(RDSEED) +DEF_PTA(ADX) +DEF_PTA(FXSR) +DEF_PTA(XSAVE) +DEF_PTA(XSAVEOPT) +DEF_PTA(AVX512F) +DEF_PTA(AVX512ER) +DEF_PTA(AVX512PF) +DEF_PTA(AVX512CD) +DEF_PTA(NO_TUNE) +DEF_PTA(SHA) +DEF_PTA(PREFETCHWT1) +DEF_PTA(CLFLUSHOPT) +DEF_PTA(XSAVEC) +DEF_PTA(XSAVES) +DEF_PTA(AVX512DQ) +DEF_PTA(AVX512BW) +DEF_PTA(AVX512VL) +DEF_PTA(AVX512IFMA) +DEF_PTA(AVX512VBMI) +DEF_PTA(CLWB) +DEF_PTA(MWAITX) +DEF_PTA(CLZERO) +DEF_PTA(NO_80387) +DEF_PTA(PKU) +DEF_PTA(AVX5124VNNIW) +DEF_PTA(AVX5124FMAPS) +DEF_PTA(AVX512VPOPCNTDQ) +DEF_PTA(SGX) +DEF_PTA(AVX512VNNI) +DEF_PTA(GFNI) +DEF_PTA(VAES) +DEF_PTA(AVX512VBMI2) +DEF_PTA(VPCLMULQDQ) +DEF_PTA(AVX512BITALG) +DEF_PTA(RDPID) +DEF_PTA(PCONFIG) +DEF_PTA(WBNOINVD) +DEF_PTA(AVX512VP2INTERSECT) +DEF_PTA(PTWRITE) +DEF_PTA(AVX512BF16) +DEF_PTA(WAITPKG) +DEF_PTA(MOVDIRI) +DEF_PTA(MOVDIR64B) +DEF_PTA(ENQCMD) +DEF_PTA(CLDEMOTE) +DEF_PTA(SERIALIZE) +DEF_PTA(TSXLDTRK) +DEF_PTA(AMX_TILE) +DEF_PTA(AMX_INT8) +DEF_PTA(AMX_BF16) +DEF_PTA(UINTR) +DEF_PTA(HRESET) +DEF_PTA(KL) +DEF_PTA(WIDEKL) +DEF_PTA(AVXVNNI) +DEF_PTA(AVX512FP16) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/i386-opts.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/i386-opts.h new file mode 100644 index 0000000..8f71e89 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/i386-opts.h @@ -0,0 +1,131 @@ +/* Definitions for option handling for IA-32. + Copyright (C) 1988-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#ifndef I386_OPTS_H +#define I386_OPTS_H + +/* Algorithm to expand string function with. */ +enum stringop_alg +{ +#undef DEF_ALG +#define DEF_ALG(alg, name) alg, + +#include "stringop.def" +last_alg + +#undef DEF_ALG +}; + +/* Available call abi. */ +enum calling_abi +{ + SYSV_ABI = 0, + MS_ABI = 1 +}; + +enum fpmath_unit +{ + FPMATH_387 = 1, + FPMATH_SSE = 2 +}; + +enum tls_dialect +{ + TLS_DIALECT_GNU, + TLS_DIALECT_GNU2, + TLS_DIALECT_SUN +}; + +enum cmodel { + CM_32, /* The traditional 32-bit ABI. */ + CM_SMALL, /* Assumes all code and data fits in the low 31 bits. */ + CM_KERNEL, /* Assumes all code and data fits in the high 31 bits. */ + CM_MEDIUM, /* Assumes code fits in the low 31 bits; data unlimited. */ + CM_LARGE, /* No assumptions. */ + CM_SMALL_PIC, /* Assumes code+data+got/plt fits in a 31 bit region. */ + CM_MEDIUM_PIC,/* Assumes code+got/plt fits in a 31 bit region. */ + CM_LARGE_PIC /* No assumptions. */ +}; + +enum pmode { + PMODE_SI, /* Pmode == SImode. */ + PMODE_DI /* Pmode == DImode. */ +}; + +enum ix86_align_data { + ix86_align_data_type_compat, + ix86_align_data_type_abi, + ix86_align_data_type_cacheline +}; + +enum asm_dialect { + ASM_ATT, + ASM_INTEL +}; + +enum ix86_veclibabi { + ix86_veclibabi_type_none, + ix86_veclibabi_type_svml, + ix86_veclibabi_type_acml +}; + +enum stack_protector_guard { + SSP_TLS, /* per-thread canary in TLS block */ + SSP_GLOBAL /* global canary */ +}; + +enum prefer_vector_width { + PVW_NONE, + PVW_AVX128, + PVW_AVX256, + PVW_AVX512 +}; + +/* This is used to mitigate variant #2 of the speculative execution + vulnerabilities on x86 processors identified by CVE-2017-5715, aka + Spectre. They convert indirect branches and function returns to + call and return thunks to avoid speculative execution via indirect + call, jmp and ret. */ +enum indirect_branch { + indirect_branch_unset = 0, + indirect_branch_keep, + indirect_branch_thunk, + indirect_branch_thunk_inline, + indirect_branch_thunk_extern +}; + +enum instrument_return { + instrument_return_none = 0, + instrument_return_call, + instrument_return_nop5 +}; + +enum harden_sls { + harden_sls_none = 0, + harden_sls_return = 1 << 0, + harden_sls_indirect_jmp = 1 << 1, + harden_sls_all = harden_sls_return | harden_sls_indirect_jmp +}; + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/i386-protos.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/i386-protos.h new file mode 100644 index 0000000..3596ce8 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/i386-protos.h @@ -0,0 +1,412 @@ +/* Definitions of target machine for GCC for IA-32. + Copyright (C) 1988-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* In i386-common.cc. */ +extern bool ix86_handle_option (struct gcc_options *opts, + struct gcc_options *opts_set ATTRIBUTE_UNUSED, + const struct cl_decoded_option *decoded, + location_t loc); + +/* Functions in i386.cc */ +extern bool ix86_target_stack_probe (void); +extern bool ix86_can_use_return_insn_p (void); +extern bool ix86_function_ms_hook_prologue (const_tree fn); +extern void ix86_setup_frame_addresses (void); +extern bool ix86_rip_relative_addr_p (struct ix86_address *parts); + +extern HOST_WIDE_INT ix86_initial_elimination_offset (int, int); +extern void ix86_expand_prologue (void); +extern void ix86_maybe_emit_epilogue_vzeroupper (void); +extern void ix86_expand_epilogue (int); +extern void ix86_expand_split_stack_prologue (void); + +extern void ix86_output_addr_vec_elt (FILE *, int); +extern void ix86_output_addr_diff_elt (FILE *, int, int); + +extern const char *ix86_output_ssemov (rtx_insn *, rtx *); + +extern enum calling_abi ix86_cfun_abi (void); +extern enum calling_abi ix86_function_type_abi (const_tree); + +extern bool ix86_use_pseudo_pic_reg (void); + +extern void ix86_reset_previous_fndecl (void); + +extern bool ix86_using_red_zone (void); + +extern rtx ix86_gen_scratch_sse_rtx (machine_mode); + +extern unsigned int ix86_regmode_natural_size (machine_mode); +extern bool ix86_check_builtin_isa_match (unsigned int fcode); +#ifdef RTX_CODE +extern int standard_80387_constant_p (rtx); +extern const char *standard_80387_constant_opcode (rtx); +extern rtx standard_80387_constant_rtx (int); +extern int standard_sse_constant_p (rtx, machine_mode); +extern const char *standard_sse_constant_opcode (rtx_insn *, rtx *); +extern bool ix86_standard_x87sse_constant_load_p (const rtx_insn *, rtx); +extern bool ix86_pre_reload_split (void); +extern bool symbolic_reference_mentioned_p (rtx); +extern bool extended_reg_mentioned_p (rtx); +extern bool x86_extended_QIreg_mentioned_p (rtx_insn *); +extern bool x86_extended_reg_mentioned_p (rtx); +extern bool x86_maybe_negate_const_int (rtx *, machine_mode); +extern machine_mode ix86_cc_mode (enum rtx_code, rtx, rtx); + +extern int avx_vpermilp_parallel (rtx par, machine_mode mode); +extern int avx_vperm2f128_parallel (rtx par, machine_mode mode); + +extern bool ix86_expand_strlen (rtx, rtx, rtx, rtx); +extern bool ix86_expand_set_or_cpymem (rtx, rtx, rtx, rtx, rtx, rtx, + rtx, rtx, rtx, rtx, bool); +extern bool ix86_expand_cmpstrn_or_cmpmem (rtx, rtx, rtx, rtx, rtx, bool); + +extern bool constant_address_p (rtx); +extern bool legitimate_pic_operand_p (rtx); +extern bool legitimate_pic_address_disp_p (rtx); +extern bool ix86_force_load_from_GOT_p (rtx, bool = false); +extern void print_reg (rtx, int, FILE*); +extern void ix86_print_operand (FILE *, rtx, int); + +extern void split_double_mode (machine_mode, rtx[], int, rtx[], rtx[]); + +extern const char *output_set_got (rtx, rtx); +extern const char *output_387_binary_op (rtx_insn *, rtx*); +extern const char *output_387_reg_move (rtx_insn *, rtx*); +extern const char *output_fix_trunc (rtx_insn *, rtx*, bool); +extern const char *output_fp_compare (rtx_insn *, rtx*, bool, bool); +extern const char *output_adjust_stack_and_probe (rtx); +extern const char *output_probe_stack_range (rtx, rtx); + +extern void ix86_output_patchable_area (unsigned int, bool); + +extern void ix86_expand_clear (rtx); +extern void ix86_expand_move (machine_mode, rtx[]); +extern void ix86_expand_vector_move (machine_mode, rtx[]); +extern void ix86_expand_vector_move_misalign (machine_mode, rtx[]); +extern rtx ix86_fixup_binary_operands (enum rtx_code, + machine_mode, rtx[]); +extern void ix86_fixup_binary_operands_no_copy (enum rtx_code, + machine_mode, rtx[]); +extern void ix86_expand_binary_operator (enum rtx_code, + machine_mode, rtx[]); +extern void ix86_expand_vector_logical_operator (enum rtx_code, + machine_mode, rtx[]); +extern bool ix86_binary_operator_ok (enum rtx_code, machine_mode, rtx[]); +extern bool ix86_avoid_lea_for_add (rtx_insn *, rtx[]); +extern bool ix86_use_lea_for_mov (rtx_insn *, rtx[]); +extern bool ix86_avoid_lea_for_addr (rtx_insn *, rtx[]); +extern void ix86_split_lea_for_addr (rtx_insn *, rtx[], machine_mode); +extern bool ix86_lea_for_add_ok (rtx_insn *, rtx[]); +extern int ix86_last_zero_store_uid; +extern bool ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high); +extern bool ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn); +extern bool ix86_agi_dependent (rtx_insn *set_insn, rtx_insn *use_insn); +extern void ix86_expand_unary_operator (enum rtx_code, machine_mode, + rtx[]); +extern rtx ix86_build_const_vector (machine_mode, bool, rtx); +extern rtx ix86_build_signbit_mask (machine_mode, bool, bool); +extern void ix86_split_convert_uns_si_sse (rtx[]); +extern void ix86_expand_convert_uns_didf_sse (rtx, rtx); +extern void ix86_expand_convert_uns_sixf_sse (rtx, rtx); +extern void ix86_expand_convert_uns_sidf_sse (rtx, rtx); +extern void ix86_expand_convert_uns_sisf_sse (rtx, rtx); +extern void ix86_expand_convert_sign_didf_sse (rtx, rtx); +extern void ix86_expand_vector_convert_uns_vsivsf (rtx, rtx); +extern rtx ix86_expand_adjust_ufix_to_sfix_si (rtx, rtx *); +extern enum ix86_fpcmp_strategy ix86_fp_comparison_strategy (enum rtx_code); +extern void ix86_expand_fp_absneg_operator (enum rtx_code, machine_mode, + rtx[]); +extern void ix86_split_fp_absneg_operator (enum rtx_code, machine_mode, + rtx[]); +extern void ix86_expand_copysign (rtx []); +extern void ix86_expand_xorsign (rtx []); +extern bool ix86_unary_operator_ok (enum rtx_code, machine_mode, rtx[]); +extern bool ix86_match_ccmode (rtx, machine_mode); +extern void ix86_expand_branch (enum rtx_code, rtx, rtx, rtx); +extern void ix86_expand_setcc (rtx, enum rtx_code, rtx, rtx); +extern bool ix86_expand_int_movcc (rtx[]); +extern bool ix86_expand_fp_movcc (rtx[]); +extern bool ix86_expand_fp_vcond (rtx[]); +extern bool ix86_expand_int_vcond (rtx[]); +extern void ix86_expand_vec_perm (rtx[]); +extern bool ix86_expand_mask_vec_cmp (rtx, enum rtx_code, rtx, rtx); +extern bool ix86_expand_int_vec_cmp (rtx[]); +extern bool ix86_expand_fp_vec_cmp (rtx[]); +extern void ix86_expand_sse_movcc (rtx, rtx, rtx, rtx); +extern void ix86_expand_sse_unpack (rtx, rtx, bool, bool); +extern void ix86_expand_fp_spaceship (rtx, rtx, rtx); +extern bool ix86_expand_int_addcc (rtx[]); +extern rtx_insn *ix86_expand_call (rtx, rtx, rtx, rtx, rtx, bool); +extern bool ix86_call_use_plt_p (rtx); +extern void ix86_split_call_vzeroupper (rtx, rtx); +extern void x86_initialize_trampoline (rtx, rtx, rtx); +extern rtx ix86_zero_extend_to_Pmode (rtx); +extern void ix86_split_long_move (rtx[]); +extern void ix86_split_ashl (rtx *, rtx, machine_mode); +extern void ix86_split_ashr (rtx *, rtx, machine_mode); +extern void ix86_split_lshr (rtx *, rtx, machine_mode); +extern void ix86_expand_v1ti_shift (enum rtx_code, rtx[]); +extern void ix86_expand_v1ti_rotate (enum rtx_code, rtx[]); +extern void ix86_expand_v1ti_ashiftrt (rtx[]); +extern rtx ix86_find_base_term (rtx); +extern bool ix86_check_movabs (rtx, int); +extern bool ix86_check_no_addr_space (rtx); +extern void ix86_split_idivmod (machine_mode, rtx[], bool); +extern bool ix86_hardreg_mov_ok (rtx, rtx); + +extern rtx assign_386_stack_local (machine_mode, enum ix86_stack_slot); +extern int ix86_attr_length_immediate_default (rtx_insn *, bool); +extern int ix86_attr_length_address_default (rtx_insn *); +extern int ix86_attr_length_vex_default (rtx_insn *, bool, bool); + +extern rtx ix86_libcall_value (machine_mode); +extern bool ix86_function_arg_regno_p (int); +extern void ix86_asm_output_function_label (FILE *, const char *, tree); +extern void ix86_call_abi_override (const_tree); +extern int ix86_reg_parm_stack_space (const_tree); + +extern bool ix86_libc_has_function (enum function_class fn_class); + +extern void x86_order_regs_for_local_alloc (void); +extern void x86_function_profiler (FILE *, int); +extern void x86_emit_floatuns (rtx [2]); +extern void ix86_emit_fp_unordered_jump (rtx); + +extern void ix86_emit_i387_sinh (rtx, rtx); +extern void ix86_emit_i387_cosh (rtx, rtx); +extern void ix86_emit_i387_tanh (rtx, rtx); +extern void ix86_emit_i387_asinh (rtx, rtx); +extern void ix86_emit_i387_acosh (rtx, rtx); +extern void ix86_emit_i387_atanh (rtx, rtx); +extern void ix86_emit_i387_log1p (rtx, rtx); +extern void ix86_emit_i387_round (rtx, rtx); +extern void ix86_emit_swdivsf (rtx, rtx, rtx, machine_mode); +extern void ix86_emit_swsqrtsf (rtx, rtx, machine_mode, bool); + +extern enum rtx_code ix86_reverse_condition (enum rtx_code, machine_mode); + +extern void ix86_expand_lround (rtx, rtx); +extern void ix86_expand_lfloorceil (rtx, rtx, bool); +extern void ix86_expand_rint (rtx, rtx); +extern void ix86_expand_floorceil (rtx, rtx, bool); +extern void ix86_expand_floorceildf_32 (rtx, rtx, bool); +extern void ix86_expand_trunc (rtx, rtx); +extern void ix86_expand_truncdf_32 (rtx, rtx); +extern void ix86_expand_round (rtx, rtx); +extern void ix86_expand_rounddf_32 (rtx, rtx); +extern void ix86_expand_round_sse4 (rtx, rtx); + +extern void ix86_expand_vecop_qihi (enum rtx_code, rtx, rtx, rtx); +extern rtx ix86_split_stack_guard (void); + +extern void ix86_move_vector_high_sse_to_mmx (rtx); +extern void ix86_split_mmx_pack (rtx[], enum rtx_code); +extern void ix86_split_mmx_punpck (rtx[], bool); +extern void ix86_expand_avx_vzeroupper (void); +extern void ix86_expand_atomic_fetch_op_loop (rtx, rtx, rtx, enum rtx_code, + bool, bool); +extern void ix86_expand_cmpxchg_loop (rtx *, rtx, rtx, rtx, rtx, rtx, + bool, rtx_code_label *); + +#ifdef TREE_CODE +extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree, int); +#endif /* TREE_CODE */ + +#endif /* RTX_CODE */ + +#ifdef TREE_CODE +extern int ix86_data_alignment (tree, unsigned int, bool); +extern unsigned int ix86_local_alignment (tree, machine_mode, + unsigned int, bool = false); +extern unsigned int ix86_minimum_alignment (tree, machine_mode, + unsigned int); +extern tree ix86_handle_shared_attribute (tree *, tree, tree, int, bool *); +extern tree ix86_handle_selectany_attribute (tree *, tree, tree, int, bool *); +extern int x86_field_alignment (tree, int); +extern tree ix86_valid_target_attribute_tree (tree, tree, + struct gcc_options *, + struct gcc_options *, bool); +extern unsigned int ix86_get_callcvt (const_tree); + +#endif + +extern rtx ix86_tls_module_base (void); +extern bool ix86_gpr_tls_address_pattern_p (rtx); +extern bool ix86_tls_address_pattern_p (rtx); +extern rtx ix86_rewrite_tls_address (rtx); + +extern void ix86_expand_vector_init (bool, rtx, rtx); +extern void ix86_expand_vector_set (bool, rtx, rtx, int); +extern void ix86_expand_vector_set_var (rtx, rtx, rtx); +extern void ix86_expand_vector_extract (bool, rtx, rtx, int); +extern void ix86_expand_reduc (rtx (*)(rtx, rtx, rtx), rtx, rtx); + +extern void ix86_expand_vec_extract_even_odd (rtx, rtx, rtx, unsigned); +extern bool ix86_expand_pextr (rtx *); +extern bool ix86_expand_pinsr (rtx *); +extern void ix86_expand_mul_widen_evenodd (rtx, rtx, rtx, bool, bool); +extern void ix86_expand_mul_widen_hilo (rtx, rtx, rtx, bool, bool); +extern void ix86_expand_sse2_mulv4si3 (rtx, rtx, rtx); +extern void ix86_expand_sse2_mulvxdi3 (rtx, rtx, rtx); +extern void ix86_expand_sse2_abs (rtx, rtx); +extern bool ix86_expand_vector_init_duplicate (bool, machine_mode, rtx, + rtx); +extern bool ix86_extract_perm_from_pool_constant (int*, rtx); + +/* In i386-c.cc */ +extern void ix86_target_macros (void); +extern void ix86_register_pragmas (void); + +/* In i386-d.cc */ +extern void ix86_d_target_versions (void); +extern void ix86_d_register_target_info (void); +extern bool ix86_d_has_stdcall_convention (unsigned int *, unsigned int *); + +/* In winnt.cc */ +extern void i386_pe_unique_section (tree, int); +extern void i386_pe_declare_function_type (FILE *, const char *, int); +extern void i386_pe_record_external_function (tree, const char *); +extern void i386_pe_maybe_record_exported_symbol (tree, const char *, int); +extern void i386_pe_encode_section_info (tree, rtx, int); +extern bool i386_pe_binds_local_p (const_tree); +extern const char *i386_pe_strip_name_encoding_full (const char *); +extern bool i386_pe_valid_dllimport_attribute_p (const_tree); +extern unsigned int i386_pe_section_type_flags (tree, const char *, int); +extern void i386_pe_asm_named_section (const char *, unsigned int, tree); +extern void i386_pe_asm_output_aligned_decl_common (FILE *, tree, + const char *, + HOST_WIDE_INT, + HOST_WIDE_INT); +extern void i386_pe_file_end (void); +extern void i386_pe_asm_lto_start (void); +extern void i386_pe_asm_lto_end (void); +extern void i386_pe_start_function (FILE *, const char *, tree); +extern void i386_pe_end_function (FILE *, const char *, tree); +extern void i386_pe_end_cold_function (FILE *, const char *, tree); +extern void i386_pe_assemble_visibility (tree, int); +extern tree i386_pe_mangle_decl_assembler_name (tree, tree); +extern tree i386_pe_mangle_assembler_name (const char *); +extern void i386_pe_record_stub (const char *); + +extern void i386_pe_seh_init (FILE *); +extern void i386_pe_seh_end_prologue (FILE *); +extern void i386_pe_seh_cold_init (FILE *, const char *); +extern void i386_pe_seh_unwind_emit (FILE *, rtx_insn *); +extern void i386_pe_seh_emit_except_personality (rtx); +extern void i386_pe_seh_init_sections (void); + +/* In winnt-cxx.cc and winnt-stubs.cc */ +extern void i386_pe_adjust_class_at_definition (tree); +extern bool i386_pe_type_dllimport_p (tree); +extern bool i386_pe_type_dllexport_p (tree); + +extern int i386_pe_reloc_rw_mask (void); + +extern char internal_label_prefix[16]; +extern int internal_label_prefix_len; + +extern bool ix86_epilogue_uses (int); + +struct ix86_address +{ + rtx base, index, disp; + HOST_WIDE_INT scale; + addr_space_t seg; +}; + +extern bool ix86_decompose_address (rtx, struct ix86_address *); +extern int memory_address_length (rtx, bool); +extern void x86_output_aligned_bss (FILE *, tree, const char *, + unsigned HOST_WIDE_INT, unsigned); +extern void x86_elf_aligned_decl_common (FILE *, tree, const char *, + unsigned HOST_WIDE_INT, unsigned); + +#ifdef RTX_CODE +extern void ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *, + enum rtx_code *, enum rtx_code *); +extern enum rtx_code ix86_fp_compare_code_to_integer (enum rtx_code); +#endif +extern int asm_preferred_eh_data_format (int, int); + +#ifdef HAVE_ATTR_cpu +extern enum attr_cpu ix86_schedule; +#endif + +extern const char * ix86_output_call_insn (rtx_insn *insn, rtx call_op); +extern const char * ix86_output_indirect_jmp (rtx call_op); +extern const char * ix86_output_function_return (bool long_p); +extern const char * ix86_output_indirect_function_return (rtx ret_op); +extern void ix86_split_simple_return_pop_internal (rtx); +extern bool ix86_operands_ok_for_move_multiple (rtx *operands, bool load, + machine_mode mode); +extern int ix86_min_insn_size (rtx_insn *); + +extern int ix86_issue_rate (void); +extern int ix86_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, + int cost, unsigned int); +extern int ia32_multipass_dfa_lookahead (void); +extern bool ix86_macro_fusion_p (void); +extern bool ix86_macro_fusion_pair_p (rtx_insn *condgen, rtx_insn *condjmp); + +extern bool ix86_bd_has_dispatch (rtx_insn *insn, int action); +extern void ix86_bd_do_dispatch (rtx_insn *insn, int mode); + +extern void ix86_core2i7_init_hooks (void); + +extern int ix86_atom_sched_reorder (FILE *, int, rtx_insn **, int *, int); + +extern poly_int64 ix86_push_rounding (poly_int64); + +#ifdef RTX_CODE +/* Target data for multipass lookahead scheduling. + Currently used for Core 2/i7 tuning. */ +struct ix86_first_cycle_multipass_data_ +{ + /* The length (in bytes) of ifetch block in this solution. */ + int ifetch_block_len; + /* Number of instructions in ifetch block in this solution. */ + int ifetch_block_n_insns; + /* Bitmap to remember changes to ready_try for backtracking. */ + sbitmap ready_try_change; + /* Size of the bitmap. */ + int ready_try_change_size; +}; +# define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T \ + struct ix86_first_cycle_multipass_data_ +#endif /* RTX_CODE */ + +const addr_space_t ADDR_SPACE_SEG_FS = 1; +const addr_space_t ADDR_SPACE_SEG_GS = 2; + +namespace gcc { class context; } +class rtl_opt_pass; + +extern rtl_opt_pass *make_pass_insert_vzeroupper (gcc::context *); +extern rtl_opt_pass *make_pass_stv (gcc::context *); +extern rtl_opt_pass *make_pass_insert_endbr_and_patchable_area + (gcc::context *); +extern rtl_opt_pass *make_pass_remove_partial_avx_dependency + (gcc::context *); + +extern bool ix86_has_no_direct_extern_access; + +/* In i386-expand.cc. */ +bool ix86_check_builtin_isa_match (unsigned int, HOST_WIDE_INT*, + HOST_WIDE_INT*); diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/i386.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/i386.h new file mode 100644 index 0000000..363082b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/i386.h @@ -0,0 +1,2866 @@ +/* Definitions of target machine for GCC for IA-32. + Copyright (C) 1988-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* The purpose of this file is to define the characteristics of the i386, + independent of assembler syntax or operating system. + + Three other files build on this one to describe a specific assembler syntax: + bsd386.h, att386.h, and sun386.h. + + The actual tm.h file for a particular system should include + this file, and then the file for the appropriate assembler syntax. + + Many macros that specify assembler syntax are omitted entirely from + this file because they really belong in the files for particular + assemblers. These include RP, IP, LPREFIX, PUT_OP_SIZE, USE_STAR, + ADDR_BEG, ADDR_END, PRINT_IREG, PRINT_SCALE, PRINT_B_I_S, and many + that start with ASM_ or end in ASM_OP. */ + +/* Redefines for option macros. */ + +#define TARGET_CMPXCHG16B TARGET_CX16 +#define TARGET_CMPXCHG16B_P(x) TARGET_CX16_P(x) + +#define TARGET_LP64 TARGET_ABI_64 +#define TARGET_LP64_P(x) TARGET_ABI_64_P(x) +#define TARGET_X32 TARGET_ABI_X32 +#define TARGET_X32_P(x) TARGET_ABI_X32_P(x) +#define TARGET_16BIT TARGET_CODE16 +#define TARGET_16BIT_P(x) TARGET_CODE16_P(x) + +#define TARGET_MMX_WITH_SSE (TARGET_64BIT && TARGET_SSE2) + +#include "config/vxworks-dummy.h" + +#include "config/i386/i386-opts.h" + +#define MAX_STRINGOP_ALGS 4 + +/* Specify what algorithm to use for stringops on known size. + When size is unknown, the UNKNOWN_SIZE alg is used. When size is + known at compile time or estimated via feedback, the SIZE array + is walked in order until MAX is greater then the estimate (or -1 + means infinity). Corresponding ALG is used then. + When NOALIGN is true the code guaranting the alignment of the memory + block is skipped. + + For example initializer: + {{256, loop}, {-1, rep_prefix_4_byte}} + will use loop for blocks smaller or equal to 256 bytes, rep prefix will + be used otherwise. */ +struct stringop_algs +{ + const enum stringop_alg unknown_size; + const struct stringop_strategy { + /* Several older compilers delete the default constructor because of the + const entries (see PR100246). Manually specifying a CTOR works around + this issue. Since this header is used by code compiled with the C + compiler we must guard the addition. */ +#ifdef __cplusplus + constexpr + stringop_strategy (int _max = -1, enum stringop_alg _alg = libcall, + int _noalign = false) + : max (_max), alg (_alg), noalign (_noalign) {} +#endif + const int max; + const enum stringop_alg alg; + int noalign; + } size [MAX_STRINGOP_ALGS]; +}; + +/* Analog of COSTS_N_INSNS when optimizing for size. */ +#ifndef COSTS_N_BYTES +#define COSTS_N_BYTES(N) ((N) * 2) +#endif + +/* Define the specific costs for a given cpu. NB: hard_register is used + by TARGET_REGISTER_MOVE_COST and TARGET_MEMORY_MOVE_COST to compute + hard register move costs by register allocator. Relative costs of + pseudo register load and store versus pseudo register moves in RTL + expressions for TARGET_RTX_COSTS can be different from relative + costs of hard registers to get the most efficient operations with + pseudo registers. */ + +struct processor_costs { + /* Costs used by register allocator. integer->integer register move + cost is 2. */ + struct + { + const int movzbl_load; /* cost of loading using movzbl */ + const int int_load[3]; /* cost of loading integer registers + in QImode, HImode and SImode relative + to reg-reg move (2). */ + const int int_store[3]; /* cost of storing integer register + in QImode, HImode and SImode */ + const int fp_move; /* cost of reg,reg fld/fst */ + const int fp_load[3]; /* cost of loading FP register + in SFmode, DFmode and XFmode */ + const int fp_store[3]; /* cost of storing FP register + in SFmode, DFmode and XFmode */ + const int mmx_move; /* cost of moving MMX register. */ + const int mmx_load[2]; /* cost of loading MMX register + in SImode and DImode */ + const int mmx_store[2]; /* cost of storing MMX register + in SImode and DImode */ + const int xmm_move; /* cost of moving XMM register. */ + const int ymm_move; /* cost of moving XMM register. */ + const int zmm_move; /* cost of moving XMM register. */ + const int sse_load[5]; /* cost of loading SSE register + in 32bit, 64bit, 128bit, 256bit and 512bit */ + const int sse_store[5]; /* cost of storing SSE register + in SImode, DImode and TImode. */ + const int sse_to_integer; /* cost of moving SSE register to integer. */ + const int integer_to_sse; /* cost of moving integer register to SSE. */ + const int mask_to_integer; /* cost of moving mask register to integer. */ + const int integer_to_mask; /* cost of moving integer register to mask. */ + const int mask_load[3]; /* cost of loading mask registers + in QImode, HImode and SImode. */ + const int mask_store[3]; /* cost of storing mask register + in QImode, HImode and SImode. */ + const int mask_move; /* cost of moving mask register. */ + } hard_register; + + const int add; /* cost of an add instruction */ + const int lea; /* cost of a lea instruction */ + const int shift_var; /* variable shift costs */ + const int shift_const; /* constant shift costs */ + const int mult_init[5]; /* cost of starting a multiply + in QImode, HImode, SImode, DImode, TImode*/ + const int mult_bit; /* cost of multiply per each bit set */ + const int divide[5]; /* cost of a divide/mod + in QImode, HImode, SImode, DImode, TImode*/ + int movsx; /* The cost of movsx operation. */ + int movzx; /* The cost of movzx operation. */ + const int large_insn; /* insns larger than this cost more */ + const int move_ratio; /* The threshold of number of scalar + memory-to-memory move insns. */ + const int clear_ratio; /* The threshold of number of scalar + memory clearing insns. */ + const int int_load[3]; /* cost of loading integer registers + in QImode, HImode and SImode relative + to reg-reg move (2). */ + const int int_store[3]; /* cost of storing integer register + in QImode, HImode and SImode */ + const int sse_load[5]; /* cost of loading SSE register + in 32bit, 64bit, 128bit, 256bit and 512bit */ + const int sse_store[5]; /* cost of storing SSE register + in 32bit, 64bit, 128bit, 256bit and 512bit */ + const int sse_unaligned_load[5];/* cost of unaligned load. */ + const int sse_unaligned_store[5];/* cost of unaligned store. */ + const int xmm_move, ymm_move, /* cost of moving XMM and YMM register. */ + zmm_move; + const int sse_to_integer; /* cost of moving SSE register to integer. */ + const int gather_static, gather_per_elt; /* Cost of gather load is computed + as static + per_item * nelts. */ + const int scatter_static, scatter_per_elt; /* Cost of gather store is + computed as static + per_item * nelts. */ + const int l1_cache_size; /* size of l1 cache, in kilobytes. */ + const int l2_cache_size; /* size of l2 cache, in kilobytes. */ + const int prefetch_block; /* bytes moved to cache for prefetch. */ + const int simultaneous_prefetches; /* number of parallel prefetch + operations. */ + const int branch_cost; /* Default value for BRANCH_COST. */ + const int fadd; /* cost of FADD and FSUB instructions. */ + const int fmul; /* cost of FMUL instruction. */ + const int fdiv; /* cost of FDIV instruction. */ + const int fabs; /* cost of FABS instruction. */ + const int fchs; /* cost of FCHS instruction. */ + const int fsqrt; /* cost of FSQRT instruction. */ + /* Specify what algorithm + to use for stringops on unknown size. */ + const int sse_op; /* cost of cheap SSE instruction. */ + const int addss; /* cost of ADDSS/SD SUBSS/SD instructions. */ + const int mulss; /* cost of MULSS instructions. */ + const int mulsd; /* cost of MULSD instructions. */ + const int fmass; /* cost of FMASS instructions. */ + const int fmasd; /* cost of FMASD instructions. */ + const int divss; /* cost of DIVSS instructions. */ + const int divsd; /* cost of DIVSD instructions. */ + const int sqrtss; /* cost of SQRTSS instructions. */ + const int sqrtsd; /* cost of SQRTSD instructions. */ + const int reassoc_int, reassoc_fp, reassoc_vec_int, reassoc_vec_fp; + /* Specify reassociation width for integer, + fp, vector integer and vector fp + operations. Generally should correspond + to number of instructions executed in + parallel. See also + ix86_reassociation_width. */ + struct stringop_algs *memcpy, *memset; + const int cond_taken_branch_cost; /* Cost of taken branch for vectorizer + cost model. */ + const int cond_not_taken_branch_cost;/* Cost of not taken branch for + vectorizer cost model. */ + + /* The "0:0:8" label alignment specified for some processors generates + secondary 8-byte alignment only for those label/jump/loop targets + which have primary alignment. */ + const char *const align_loop; /* Loop alignment. */ + const char *const align_jump; /* Jump alignment. */ + const char *const align_label; /* Label alignment. */ + const char *const align_func; /* Function alignment. */ +}; + +extern const struct processor_costs *ix86_cost; +extern const struct processor_costs ix86_size_cost; + +#define ix86_cur_cost() \ + (optimize_insn_for_size_p () ? &ix86_size_cost: ix86_cost) + +/* Macros used in the machine description to test the flags. */ + +/* configure can arrange to change it. */ + +#ifndef TARGET_CPU_DEFAULT +#define TARGET_CPU_DEFAULT PROCESSOR_GENERIC +#endif + +#ifndef TARGET_FPMATH_DEFAULT +#define TARGET_FPMATH_DEFAULT \ + (TARGET_64BIT && TARGET_SSE ? FPMATH_SSE : FPMATH_387) +#endif + +#ifndef TARGET_FPMATH_DEFAULT_P +#define TARGET_FPMATH_DEFAULT_P(x) \ + (TARGET_64BIT_P(x) && TARGET_SSE_P(x) ? FPMATH_SSE : FPMATH_387) +#endif + +/* If the i387 is disabled or -miamcu is used , then do not return + values in it. */ +#define TARGET_FLOAT_RETURNS_IN_80387 \ + (TARGET_FLOAT_RETURNS && TARGET_80387 && !TARGET_IAMCU) +#define TARGET_FLOAT_RETURNS_IN_80387_P(x) \ + (TARGET_FLOAT_RETURNS_P(x) && TARGET_80387_P(x) && !TARGET_IAMCU_P(x)) + +/* 64bit Sledgehammer mode. For libgcc2 we make sure this is a + compile-time constant. */ +#ifdef IN_LIBGCC2 +#undef TARGET_64BIT +#ifdef __x86_64__ +#define TARGET_64BIT 1 +#else +#define TARGET_64BIT 0 +#endif +#else +#ifndef TARGET_BI_ARCH +#undef TARGET_64BIT +#undef TARGET_64BIT_P +#if TARGET_64BIT_DEFAULT +#define TARGET_64BIT 1 +#define TARGET_64BIT_P(x) 1 +#else +#define TARGET_64BIT 0 +#define TARGET_64BIT_P(x) 0 +#endif +#endif +#endif + +#define HAS_LONG_COND_BRANCH 1 +#define HAS_LONG_UNCOND_BRANCH 1 + +#define TARGET_CPU_P(CPU) (ix86_tune == PROCESSOR_ ## CPU) + +/* Feature tests against the various tunings. */ +enum ix86_tune_indices { +#undef DEF_TUNE +#define DEF_TUNE(tune, name, selector) tune, +#include "x86-tune.def" +#undef DEF_TUNE +X86_TUNE_LAST +}; + +extern unsigned char ix86_tune_features[X86_TUNE_LAST]; + +#define TARGET_USE_LEAVE ix86_tune_features[X86_TUNE_USE_LEAVE] +#define TARGET_PUSH_MEMORY ix86_tune_features[X86_TUNE_PUSH_MEMORY] +#define TARGET_ZERO_EXTEND_WITH_AND \ + ix86_tune_features[X86_TUNE_ZERO_EXTEND_WITH_AND] +#define TARGET_UNROLL_STRLEN ix86_tune_features[X86_TUNE_UNROLL_STRLEN] +#define TARGET_BRANCH_PREDICTION_HINTS \ + ix86_tune_features[X86_TUNE_BRANCH_PREDICTION_HINTS] +#define TARGET_DOUBLE_WITH_ADD ix86_tune_features[X86_TUNE_DOUBLE_WITH_ADD] +#define TARGET_USE_SAHF ix86_tune_features[X86_TUNE_USE_SAHF] +#define TARGET_MOVX ix86_tune_features[X86_TUNE_MOVX] +#define TARGET_PARTIAL_REG_STALL ix86_tune_features[X86_TUNE_PARTIAL_REG_STALL] +#define TARGET_PARTIAL_FLAG_REG_STALL \ + ix86_tune_features[X86_TUNE_PARTIAL_FLAG_REG_STALL] +#define TARGET_LCP_STALL \ + ix86_tune_features[X86_TUNE_LCP_STALL] +#define TARGET_USE_HIMODE_FIOP ix86_tune_features[X86_TUNE_USE_HIMODE_FIOP] +#define TARGET_USE_SIMODE_FIOP ix86_tune_features[X86_TUNE_USE_SIMODE_FIOP] +#define TARGET_USE_MOV0 ix86_tune_features[X86_TUNE_USE_MOV0] +#define TARGET_USE_CLTD ix86_tune_features[X86_TUNE_USE_CLTD] +#define TARGET_USE_XCHGB ix86_tune_features[X86_TUNE_USE_XCHGB] +#define TARGET_SPLIT_LONG_MOVES ix86_tune_features[X86_TUNE_SPLIT_LONG_MOVES] +#define TARGET_READ_MODIFY_WRITE ix86_tune_features[X86_TUNE_READ_MODIFY_WRITE] +#define TARGET_READ_MODIFY ix86_tune_features[X86_TUNE_READ_MODIFY] +#define TARGET_PROMOTE_QImode ix86_tune_features[X86_TUNE_PROMOTE_QIMODE] +#define TARGET_FAST_PREFIX ix86_tune_features[X86_TUNE_FAST_PREFIX] +#define TARGET_SINGLE_STRINGOP ix86_tune_features[X86_TUNE_SINGLE_STRINGOP] +#define TARGET_PREFER_KNOWN_REP_MOVSB_STOSB \ + ix86_tune_features[X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB] +#define TARGET_MISALIGNED_MOVE_STRING_PRO_EPILOGUES \ + ix86_tune_features[X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES] +#define TARGET_QIMODE_MATH ix86_tune_features[X86_TUNE_QIMODE_MATH] +#define TARGET_HIMODE_MATH ix86_tune_features[X86_TUNE_HIMODE_MATH] +#define TARGET_PROMOTE_QI_REGS ix86_tune_features[X86_TUNE_PROMOTE_QI_REGS] +#define TARGET_PROMOTE_HI_REGS ix86_tune_features[X86_TUNE_PROMOTE_HI_REGS] +#define TARGET_SINGLE_POP ix86_tune_features[X86_TUNE_SINGLE_POP] +#define TARGET_DOUBLE_POP ix86_tune_features[X86_TUNE_DOUBLE_POP] +#define TARGET_SINGLE_PUSH ix86_tune_features[X86_TUNE_SINGLE_PUSH] +#define TARGET_DOUBLE_PUSH ix86_tune_features[X86_TUNE_DOUBLE_PUSH] +#define TARGET_INTEGER_DFMODE_MOVES \ + ix86_tune_features[X86_TUNE_INTEGER_DFMODE_MOVES] +#define TARGET_PARTIAL_REG_DEPENDENCY \ + ix86_tune_features[X86_TUNE_PARTIAL_REG_DEPENDENCY] +#define TARGET_SSE_PARTIAL_REG_DEPENDENCY \ + ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY] +#define TARGET_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY \ + ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY] +#define TARGET_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY \ + ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY] +#define TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \ + ix86_tune_features[X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL] +#define TARGET_SSE_UNALIGNED_STORE_OPTIMAL \ + ix86_tune_features[X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL] +#define TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL \ + ix86_tune_features[X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL] +#define TARGET_SSE_SPLIT_REGS ix86_tune_features[X86_TUNE_SSE_SPLIT_REGS] +#define TARGET_SSE_TYPELESS_STORES \ + ix86_tune_features[X86_TUNE_SSE_TYPELESS_STORES] +#define TARGET_SSE_LOAD0_BY_PXOR ix86_tune_features[X86_TUNE_SSE_LOAD0_BY_PXOR] +#define TARGET_MEMORY_MISMATCH_STALL \ + ix86_tune_features[X86_TUNE_MEMORY_MISMATCH_STALL] +#define TARGET_PROLOGUE_USING_MOVE \ + ix86_tune_features[X86_TUNE_PROLOGUE_USING_MOVE] +#define TARGET_EPILOGUE_USING_MOVE \ + ix86_tune_features[X86_TUNE_EPILOGUE_USING_MOVE] +#define TARGET_SHIFT1 ix86_tune_features[X86_TUNE_SHIFT1] +#define TARGET_USE_FFREEP ix86_tune_features[X86_TUNE_USE_FFREEP] +#define TARGET_INTER_UNIT_MOVES_TO_VEC \ + ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_TO_VEC] +#define TARGET_INTER_UNIT_MOVES_FROM_VEC \ + ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_FROM_VEC] +#define TARGET_INTER_UNIT_CONVERSIONS \ + ix86_tune_features[X86_TUNE_INTER_UNIT_CONVERSIONS] +#define TARGET_FOUR_JUMP_LIMIT ix86_tune_features[X86_TUNE_FOUR_JUMP_LIMIT] +#define TARGET_SCHEDULE ix86_tune_features[X86_TUNE_SCHEDULE] +#define TARGET_USE_BT ix86_tune_features[X86_TUNE_USE_BT] +#define TARGET_USE_INCDEC ix86_tune_features[X86_TUNE_USE_INCDEC] +#define TARGET_PAD_RETURNS ix86_tune_features[X86_TUNE_PAD_RETURNS] +#define TARGET_PAD_SHORT_FUNCTION \ + ix86_tune_features[X86_TUNE_PAD_SHORT_FUNCTION] +#define TARGET_EXT_80387_CONSTANTS \ + ix86_tune_features[X86_TUNE_EXT_80387_CONSTANTS] +#define TARGET_AVOID_VECTOR_DECODE \ + ix86_tune_features[X86_TUNE_AVOID_VECTOR_DECODE] +#define TARGET_TUNE_PROMOTE_HIMODE_IMUL \ + ix86_tune_features[X86_TUNE_PROMOTE_HIMODE_IMUL] +#define TARGET_SLOW_IMUL_IMM32_MEM \ + ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM32_MEM] +#define TARGET_SLOW_IMUL_IMM8 ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM8] +#define TARGET_MOVE_M1_VIA_OR ix86_tune_features[X86_TUNE_MOVE_M1_VIA_OR] +#define TARGET_NOT_UNPAIRABLE ix86_tune_features[X86_TUNE_NOT_UNPAIRABLE] +#define TARGET_NOT_VECTORMODE ix86_tune_features[X86_TUNE_NOT_VECTORMODE] +#define TARGET_USE_VECTOR_FP_CONVERTS \ + ix86_tune_features[X86_TUNE_USE_VECTOR_FP_CONVERTS] +#define TARGET_USE_VECTOR_CONVERTS \ + ix86_tune_features[X86_TUNE_USE_VECTOR_CONVERTS] +#define TARGET_SLOW_PSHUFB \ + ix86_tune_features[X86_TUNE_SLOW_PSHUFB] +#define TARGET_AVOID_4BYTE_PREFIXES \ + ix86_tune_features[X86_TUNE_AVOID_4BYTE_PREFIXES] +#define TARGET_USE_GATHER_2PARTS \ + ix86_tune_features[X86_TUNE_USE_GATHER_2PARTS] +#define TARGET_USE_GATHER_4PARTS \ + ix86_tune_features[X86_TUNE_USE_GATHER_4PARTS] +#define TARGET_USE_GATHER \ + ix86_tune_features[X86_TUNE_USE_GATHER] +#define TARGET_FUSE_CMP_AND_BRANCH_32 \ + ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_32] +#define TARGET_FUSE_CMP_AND_BRANCH_64 \ + ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_64] +#define TARGET_FUSE_CMP_AND_BRANCH \ + (TARGET_64BIT ? TARGET_FUSE_CMP_AND_BRANCH_64 \ + : TARGET_FUSE_CMP_AND_BRANCH_32) +#define TARGET_FUSE_CMP_AND_BRANCH_SOFLAGS \ + ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS] +#define TARGET_FUSE_ALU_AND_BRANCH \ + ix86_tune_features[X86_TUNE_FUSE_ALU_AND_BRANCH] +#define TARGET_OPT_AGU ix86_tune_features[X86_TUNE_OPT_AGU] +#define TARGET_AVOID_LEA_FOR_ADDR \ + ix86_tune_features[X86_TUNE_AVOID_LEA_FOR_ADDR] +#define TARGET_SOFTWARE_PREFETCHING_BENEFICIAL \ + ix86_tune_features[X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL] +#define TARGET_AVX256_SPLIT_REGS \ + ix86_tune_features[X86_TUNE_AVX256_SPLIT_REGS] +#define TARGET_GENERAL_REGS_SSE_SPILL \ + ix86_tune_features[X86_TUNE_GENERAL_REGS_SSE_SPILL] +#define TARGET_AVOID_MEM_OPND_FOR_CMOVE \ + ix86_tune_features[X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE] +#define TARGET_SPLIT_MEM_OPND_FOR_FP_CONVERTS \ + ix86_tune_features[X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS] +#define TARGET_ADJUST_UNROLL \ + ix86_tune_features[X86_TUNE_ADJUST_UNROLL] +#define TARGET_AVOID_FALSE_DEP_FOR_BMI \ + ix86_tune_features[X86_TUNE_AVOID_FALSE_DEP_FOR_BMI] +#define TARGET_ONE_IF_CONV_INSN \ + ix86_tune_features[X86_TUNE_ONE_IF_CONV_INSN] +#define TARGET_AVOID_MFENCE ix86_tune_features[X86_TUNE_AVOID_MFENCE] +#define TARGET_EMIT_VZEROUPPER \ + ix86_tune_features[X86_TUNE_EMIT_VZEROUPPER] +#define TARGET_EXPAND_ABS \ + ix86_tune_features[X86_TUNE_EXPAND_ABS] +#define TARGET_V2DF_REDUCTION_PREFER_HADDPD \ + ix86_tune_features[X86_TUNE_V2DF_REDUCTION_PREFER_HADDPD] +#define TARGET_DEST_FALSE_DEP_FOR_GLC \ + ix86_tune_features[X86_TUNE_DEST_FALSE_DEP_FOR_GLC] + +/* Feature tests against the various architecture variations. */ +enum ix86_arch_indices { + X86_ARCH_CMOV, + X86_ARCH_CMPXCHG, + X86_ARCH_CMPXCHG8B, + X86_ARCH_XADD, + X86_ARCH_BSWAP, + + X86_ARCH_LAST +}; + +extern unsigned char ix86_arch_features[X86_ARCH_LAST]; + +#define TARGET_CMOV ix86_arch_features[X86_ARCH_CMOV] +#define TARGET_CMPXCHG ix86_arch_features[X86_ARCH_CMPXCHG] +#define TARGET_CMPXCHG8B ix86_arch_features[X86_ARCH_CMPXCHG8B] +#define TARGET_XADD ix86_arch_features[X86_ARCH_XADD] +#define TARGET_BSWAP ix86_arch_features[X86_ARCH_BSWAP] + +/* For sane SSE instruction set generation we need fcomi instruction. + It is safe to enable all CMOVE instructions. Also, RDRAND intrinsic + expands to a sequence that includes conditional move. */ +#define TARGET_CMOVE (TARGET_CMOV || TARGET_SSE || TARGET_RDRND) + +#define TARGET_FISTTP (TARGET_SSE3 && TARGET_80387) + +extern unsigned char ix86_prefetch_sse; +#define TARGET_PREFETCH_SSE ix86_prefetch_sse + +#define ASSEMBLER_DIALECT (ix86_asm_dialect) + +#define TARGET_SSE_MATH ((ix86_fpmath & FPMATH_SSE) != 0) +#define TARGET_MIX_SSE_I387 \ + ((ix86_fpmath & (FPMATH_SSE | FPMATH_387)) == (FPMATH_SSE | FPMATH_387)) + +#define TARGET_HARD_SF_REGS (TARGET_80387 || TARGET_MMX || TARGET_SSE) +#define TARGET_HARD_DF_REGS (TARGET_80387 || TARGET_SSE) +#define TARGET_HARD_XF_REGS (TARGET_80387) + +#define TARGET_GNU_TLS (ix86_tls_dialect == TLS_DIALECT_GNU) +#define TARGET_GNU2_TLS (ix86_tls_dialect == TLS_DIALECT_GNU2) +#define TARGET_ANY_GNU_TLS (TARGET_GNU_TLS || TARGET_GNU2_TLS) +#define TARGET_SUN_TLS 0 + +#ifndef TARGET_64BIT_DEFAULT +#define TARGET_64BIT_DEFAULT 0 +#endif +#ifndef TARGET_TLS_DIRECT_SEG_REFS_DEFAULT +#define TARGET_TLS_DIRECT_SEG_REFS_DEFAULT 0 +#endif + +#define TARGET_SSP_GLOBAL_GUARD (ix86_stack_protector_guard == SSP_GLOBAL) +#define TARGET_SSP_TLS_GUARD (ix86_stack_protector_guard == SSP_TLS) + +/* Fence to use after loop using storent. */ + +extern GTY(()) tree x86_mfence; +#define FENCE_FOLLOWING_MOVNT x86_mfence + +/* Once GDB has been enhanced to deal with functions without frame + pointers, we can change this to allow for elimination of + the frame pointer in leaf functions. */ +#define TARGET_DEFAULT 0 + +/* Extra bits to force. */ +#define TARGET_SUBTARGET_DEFAULT 0 +#define TARGET_SUBTARGET_ISA_DEFAULT 0 + +/* Extra bits to force on w/ 32-bit mode. */ +#define TARGET_SUBTARGET32_DEFAULT 0 +#define TARGET_SUBTARGET32_ISA_DEFAULT 0 + +/* Extra bits to force on w/ 64-bit mode. */ +#define TARGET_SUBTARGET64_DEFAULT 0 +/* Enable MMX, SSE and SSE2 by default. */ +#define TARGET_SUBTARGET64_ISA_DEFAULT \ + (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2) + +/* Replace MACH-O, ifdefs by in-line tests, where possible. + (a) Macros defined in config/i386/darwin.h */ +#define TARGET_MACHO 0 +#define TARGET_MACHO_SYMBOL_STUBS 0 +#define MACHOPIC_ATT_STUB 0 +/* (b) Macros defined in config/darwin.h */ +#define MACHO_DYNAMIC_NO_PIC_P 0 +#define MACHOPIC_INDIRECT 0 +#define MACHOPIC_PURE 0 + +/* For the RDOS */ +#define TARGET_RDOS 0 + +/* For the Windows 64-bit ABI. */ +#define TARGET_64BIT_MS_ABI (TARGET_64BIT && ix86_cfun_abi () == MS_ABI) + +/* For the Windows 32-bit ABI. */ +#define TARGET_32BIT_MS_ABI (!TARGET_64BIT && ix86_cfun_abi () == MS_ABI) + +/* This is re-defined by cygming.h. */ +#define TARGET_SEH 0 + +/* The default abi used by target. */ +#define DEFAULT_ABI SYSV_ABI + +/* The default TLS segment register used by target. */ +#define DEFAULT_TLS_SEG_REG \ + (TARGET_64BIT ? ADDR_SPACE_SEG_FS : ADDR_SPACE_SEG_GS) + +/* Subtargets may reset this to 1 in order to enable 96-bit long double + with the rounding mode forced to 53 bits. */ +#define TARGET_96_ROUND_53_LONG_DOUBLE 0 + +#ifndef SUBTARGET_DRIVER_SELF_SPECS +# define SUBTARGET_DRIVER_SELF_SPECS "" +#endif + +#define DRIVER_SELF_SPECS SUBTARGET_DRIVER_SELF_SPECS + +/* -march=native handling only makes sense with compiler running on + an x86 or x86_64 chip. If changing this condition, also change + the condition in driver-i386.cc. */ +#if defined(__i386__) || defined(__x86_64__) +/* In driver-i386.cc. */ +extern const char *host_detect_local_cpu (int argc, const char **argv); +#define EXTRA_SPEC_FUNCTIONS \ + { "local_cpu_detect", host_detect_local_cpu }, +#define HAVE_LOCAL_CPU_DETECT +#endif + +#if TARGET_64BIT_DEFAULT +#define OPT_ARCH64 "!m32" +#define OPT_ARCH32 "m32" +#else +#define OPT_ARCH64 "m64|mx32" +#define OPT_ARCH32 "m64|mx32:;" +#endif + +/* Support for configure-time defaults of some command line options. + The order here is important so that -march doesn't squash the + tune or cpu values. */ +#define OPTION_DEFAULT_SPECS \ + {"tune", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \ + {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ + {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ + {"cpu", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \ + {"cpu_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ + {"cpu_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ + {"arch", "%{!march=*:-march=%(VALUE)}"}, \ + {"arch_32", "%{" OPT_ARCH32 ":%{!march=*:-march=%(VALUE)}}"}, \ + {"arch_64", "%{" OPT_ARCH64 ":%{!march=*:-march=%(VALUE)}}"}, + +/* Specs for the compiler proper */ + +#ifndef CC1_CPU_SPEC +#define CC1_CPU_SPEC_1 "" + +#ifndef HAVE_LOCAL_CPU_DETECT +#define CC1_CPU_SPEC CC1_CPU_SPEC_1 +#else +#define ARCH_ARG "%{" OPT_ARCH64 ":64;:32}" +#define CC1_CPU_SPEC CC1_CPU_SPEC_1 \ +"%{march=native:%>march=native %:local_cpu_detect(arch " ARCH_ARG ") \ + %{!mtune=*:%>mtune=native %:local_cpu_detect(tune " ARCH_ARG ")}} \ +%{mtune=native:%>mtune=native %:local_cpu_detect(tune " ARCH_ARG ")}" +#endif +#endif + +/* Target CPU builtins. */ +#define TARGET_CPU_CPP_BUILTINS() ix86_target_macros () + +/* Target Pragmas. */ +#define REGISTER_TARGET_PRAGMAS() ix86_register_pragmas () + +/* Target hooks for D language. */ +#define TARGET_D_CPU_VERSIONS ix86_d_target_versions +#define TARGET_D_REGISTER_CPU_TARGET_INFO ix86_d_register_target_info +#define TARGET_D_HAS_STDCALL_CONVENTION ix86_d_has_stdcall_convention + +#ifndef CC1_SPEC +#define CC1_SPEC "%(cc1_cpu) " +#endif + +/* This macro defines names of additional specifications to put in the + specs that can be used in various specifications like CC1_SPEC. Its + definition is an initializer with a subgrouping for each command option. + + Each subgrouping contains a string constant, that defines the + specification name, and a string constant that used by the GCC driver + program. + + Do not define this macro if it does not need to do anything. */ + +#ifndef SUBTARGET_EXTRA_SPECS +#define SUBTARGET_EXTRA_SPECS +#endif + +#define EXTRA_SPECS \ + { "cc1_cpu", CC1_CPU_SPEC }, \ + SUBTARGET_EXTRA_SPECS + + +/* Whether to allow x87 floating-point arithmetic on MODE (one of + SFmode, DFmode and XFmode) in the current excess precision + configuration. */ +#define X87_ENABLE_ARITH(MODE) \ + (ix86_unsafe_math_optimizations \ + || ix86_excess_precision == EXCESS_PRECISION_FAST \ + || (MODE) == XFmode) + +/* Likewise, whether to allow direct conversions from integer mode + IMODE (HImode, SImode or DImode) to MODE. */ +#define X87_ENABLE_FLOAT(MODE, IMODE) \ + (ix86_unsafe_math_optimizations \ + || ix86_excess_precision == EXCESS_PRECISION_FAST \ + || (MODE) == XFmode \ + || ((MODE) == DFmode && (IMODE) == SImode) \ + || (IMODE) == HImode) + +/* target machine storage layout */ + +#define SHORT_TYPE_SIZE 16 +#define INT_TYPE_SIZE 32 +#define LONG_TYPE_SIZE (TARGET_X32 ? 32 : BITS_PER_WORD) +#define POINTER_SIZE (TARGET_X32 ? 32 : BITS_PER_WORD) +#define LONG_LONG_TYPE_SIZE 64 +#define FLOAT_TYPE_SIZE 32 +#define DOUBLE_TYPE_SIZE 64 +#define LONG_DOUBLE_TYPE_SIZE \ + (TARGET_LONG_DOUBLE_64 ? 64 : (TARGET_LONG_DOUBLE_128 ? 128 : 80)) + +#define WIDEST_HARDWARE_FP_SIZE 80 + +#if defined (TARGET_BI_ARCH) || TARGET_64BIT_DEFAULT +#define MAX_BITS_PER_WORD 64 +#else +#define MAX_BITS_PER_WORD 32 +#endif + +/* Define this if most significant byte of a word is the lowest numbered. */ +/* That is true on the 80386. */ + +#define BITS_BIG_ENDIAN 0 + +/* Define this if most significant byte of a word is the lowest numbered. */ +/* That is not true on the 80386. */ +#define BYTES_BIG_ENDIAN 0 + +/* Define this if most significant word of a multiword number is the lowest + numbered. */ +/* Not true for 80386 */ +#define WORDS_BIG_ENDIAN 0 + +/* Width of a word, in units (bytes). */ +#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4) + +#ifndef IN_LIBGCC2 +#define MIN_UNITS_PER_WORD 4 +#endif + +/* Allocation boundary (in *bits*) for storing arguments in argument list. */ +#define PARM_BOUNDARY BITS_PER_WORD + +/* Boundary (in *bits*) on which stack pointer should be aligned. */ +#define STACK_BOUNDARY (TARGET_64BIT_MS_ABI ? 128 : BITS_PER_WORD) + +/* Stack boundary of the main function guaranteed by OS. */ +#define MAIN_STACK_BOUNDARY (TARGET_64BIT ? 128 : 32) + +/* Minimum stack boundary. */ +#define MIN_STACK_BOUNDARY BITS_PER_WORD + +/* Boundary (in *bits*) on which the stack pointer prefers to be + aligned; the compiler cannot rely on having this alignment. */ +#define PREFERRED_STACK_BOUNDARY ix86_preferred_stack_boundary + +/* It should be MIN_STACK_BOUNDARY. But we set it to 128 bits for + both 32bit and 64bit, to support codes that need 128 bit stack + alignment for SSE instructions, but can't realign the stack. */ +#define PREFERRED_STACK_BOUNDARY_DEFAULT \ + (TARGET_IAMCU ? MIN_STACK_BOUNDARY : 128) + +/* 1 if -mstackrealign should be turned on by default. It will + generate an alternate prologue and epilogue that realigns the + runtime stack if nessary. This supports mixing codes that keep a + 4-byte aligned stack, as specified by i386 psABI, with codes that + need a 16-byte aligned stack, as required by SSE instructions. */ +#define STACK_REALIGN_DEFAULT 0 + +/* Boundary (in *bits*) on which the incoming stack is aligned. */ +#define INCOMING_STACK_BOUNDARY ix86_incoming_stack_boundary + +/* According to Windows x64 software convention, the maximum stack allocatable + in the prologue is 4G - 8 bytes. Furthermore, there is a limited set of + instructions allowed to adjust the stack pointer in the epilog, forcing the + use of frame pointer for frames larger than 2 GB. This theorical limit + is reduced by 256, an over-estimated upper bound for the stack use by the + prologue. + We define only one threshold for both the prolog and the epilog. When the + frame size is larger than this threshold, we allocate the area to save SSE + regs, then save them, and then allocate the remaining. There is no SEH + unwind info for this later allocation. */ +#define SEH_MAX_FRAME_SIZE ((2U << 30) - 256) + +/* Target OS keeps a vector-aligned (128-bit, 16-byte) stack. This is + mandatory for the 64-bit ABI, and may or may not be true for other + operating systems. */ +#define TARGET_KEEPS_VECTOR_ALIGNED_STACK TARGET_64BIT + +/* Minimum allocation boundary for the code of a function. */ +#define FUNCTION_BOUNDARY 8 + +/* C++ stores the virtual bit in the lowest bit of function pointers. */ +#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_pfn + +/* Minimum size in bits of the largest boundary to which any + and all fundamental data types supported by the hardware + might need to be aligned. No data type wants to be aligned + rounder than this. + + Pentium+ prefers DFmode values to be aligned to 64 bit boundary + and Pentium Pro XFmode values at 128 bit boundaries. + + When increasing the maximum, also update + TARGET_ABSOLUTE_BIGGEST_ALIGNMENT. */ + +#define BIGGEST_ALIGNMENT \ + (TARGET_IAMCU ? 32 : (TARGET_AVX512F ? 512 : (TARGET_AVX ? 256 : 128))) + +/* Maximum stack alignment. */ +#define MAX_STACK_ALIGNMENT MAX_OFILE_ALIGNMENT + +/* Alignment value for attribute ((aligned)). It is a constant since + it is the part of the ABI. We shouldn't change it with -mavx. */ +#define ATTRIBUTE_ALIGNED_VALUE (TARGET_IAMCU ? 32 : 128) + +/* Decide whether a variable of mode MODE should be 128 bit aligned. */ +#define ALIGN_MODE_128(MODE) \ + ((MODE) == XFmode || SSE_REG_MODE_P (MODE)) + +/* The published ABIs say that doubles should be aligned on word + boundaries, so lower the alignment for structure fields unless + -malign-double is set. */ + +/* ??? Blah -- this macro is used directly by libobjc. Since it + supports no vector modes, cut out the complexity and fall back + on BIGGEST_FIELD_ALIGNMENT. */ +#ifdef IN_TARGET_LIBS +#ifdef __x86_64__ +#define BIGGEST_FIELD_ALIGNMENT 128 +#else +#define BIGGEST_FIELD_ALIGNMENT 32 +#endif +#else +#define ADJUST_FIELD_ALIGN(FIELD, TYPE, COMPUTED) \ + x86_field_alignment ((TYPE), (COMPUTED)) +#endif + +/* If defined, a C expression to compute the alignment for a static + variable. TYPE is the data type, and ALIGN is the alignment that + the object would ordinarily have. The value of this macro is used + instead of that alignment to align the object. + + If this macro is not defined, then ALIGN is used. + + One use of this macro is to increase alignment of medium-size + data to make it all fit in fewer cache lines. Another is to + cause character arrays to be word-aligned so that `strcpy' calls + that copy constants to character arrays can be done inline. */ + +#define DATA_ALIGNMENT(TYPE, ALIGN) \ + ix86_data_alignment ((TYPE), (ALIGN), true) + +/* Similar to DATA_ALIGNMENT, but for the cases where the ABI mandates + some alignment increase, instead of optimization only purposes. E.g. + AMD x86-64 psABI says that variables with array type larger than 15 bytes + must be aligned to 16 byte boundaries. + + If this macro is not defined, then ALIGN is used. */ + +#define DATA_ABI_ALIGNMENT(TYPE, ALIGN) \ + ix86_data_alignment ((TYPE), (ALIGN), false) + +/* If defined, a C expression to compute the alignment for a local + variable. TYPE is the data type, and ALIGN is the alignment that + the object would ordinarily have. The value of this macro is used + instead of that alignment to align the object. + + If this macro is not defined, then ALIGN is used. + + One use of this macro is to increase alignment of medium-size + data to make it all fit in fewer cache lines. */ + +#define LOCAL_ALIGNMENT(TYPE, ALIGN) \ + ix86_local_alignment ((TYPE), VOIDmode, (ALIGN)) + +/* If defined, a C expression to compute the alignment for stack slot. + TYPE is the data type, MODE is the widest mode available, and ALIGN + is the alignment that the slot would ordinarily have. The value of + this macro is used instead of that alignment to align the slot. + + If this macro is not defined, then ALIGN is used when TYPE is NULL, + Otherwise, LOCAL_ALIGNMENT will be used. + + One use of this macro is to set alignment of stack slot to the + maximum alignment of all possible modes which the slot may have. */ + +#define STACK_SLOT_ALIGNMENT(TYPE, MODE, ALIGN) \ + ix86_local_alignment ((TYPE), (MODE), (ALIGN)) + +/* If defined, a C expression to compute the alignment for a local + variable DECL. + + If this macro is not defined, then + LOCAL_ALIGNMENT (TREE_TYPE (DECL), DECL_ALIGN (DECL)) will be used. + + One use of this macro is to increase alignment of medium-size + data to make it all fit in fewer cache lines. */ + +#define LOCAL_DECL_ALIGNMENT(DECL) \ + ix86_local_alignment ((DECL), VOIDmode, DECL_ALIGN (DECL)) + +/* If defined, a C expression to compute the minimum required alignment + for dynamic stack realignment purposes for EXP (a TYPE or DECL), + MODE, assuming normal alignment ALIGN. + + If this macro is not defined, then (ALIGN) will be used. */ + +#define MINIMUM_ALIGNMENT(EXP, MODE, ALIGN) \ + ix86_minimum_alignment ((EXP), (MODE), (ALIGN)) + + +/* Set this nonzero if move instructions will actually fail to work + when given unaligned data. */ +#define STRICT_ALIGNMENT 0 + +/* If bit field type is int, don't let it cross an int, + and give entire struct the alignment of an int. */ +/* Required on the 386 since it doesn't have bit-field insns. */ +#define PCC_BITFIELD_TYPE_MATTERS 1 + +/* Standard register usage. */ + +/* This processor has special stack-like registers. See reg-stack.cc + for details. */ + +#define STACK_REGS + +#define IS_STACK_MODE(MODE) \ + (X87_FLOAT_MODE_P (MODE) \ + && (!(SSE_FLOAT_MODE_P (MODE) && TARGET_SSE_MATH) \ + || TARGET_MIX_SSE_I387)) + +/* Number of actual hardware registers. + The hardware registers are assigned numbers for the compiler + from 0 to just below FIRST_PSEUDO_REGISTER. + All registers that the compiler knows about must be given numbers, + even those that are not normally considered general registers. + + In the 80386 we give the 8 general purpose registers the numbers 0-7. + We number the floating point registers 8-15. + Note that registers 0-7 can be accessed as a short or int, + while only 0-3 may be used with byte `mov' instructions. + + Reg 16 does not correspond to any hardware register, but instead + appears in the RTL as an argument pointer prior to reload, and is + eliminated during reloading in favor of either the stack or frame + pointer. */ + +#define FIRST_PSEUDO_REGISTER FIRST_PSEUDO_REG + +/* Number of hardware registers that go into the DWARF-2 unwind info. + If not defined, equals FIRST_PSEUDO_REGISTER. */ + +#define DWARF_FRAME_REGISTERS 17 + +/* 1 for registers that have pervasive standard uses + and are not available for the register allocator. + On the 80386, the stack pointer is such, as is the arg pointer. + + REX registers are disabled for 32bit targets in + TARGET_CONDITIONAL_REGISTER_USAGE. */ + +#define FIXED_REGISTERS \ +/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \ +{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \ +/*arg,flags,fpsr,frame*/ \ + 1, 1, 1, 1, \ +/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \ + 0, 0, 0, 0, 0, 0, 0, 0, \ +/* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \ + 0, 0, 0, 0, 0, 0, 0, 0, \ +/* r8, r9, r10, r11, r12, r13, r14, r15*/ \ + 0, 0, 0, 0, 0, 0, 0, 0, \ +/*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \ + 0, 0, 0, 0, 0, 0, 0, 0, \ +/*xmm16,xmm17,xmm18,xmm19,xmm20,xmm21,xmm22,xmm23*/ \ + 0, 0, 0, 0, 0, 0, 0, 0, \ +/*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \ + 0, 0, 0, 0, 0, 0, 0, 0, \ +/* k0, k1, k2, k3, k4, k5, k6, k7*/ \ + 0, 0, 0, 0, 0, 0, 0, 0 } + +/* 1 for registers not available across function calls. + These must include the FIXED_REGISTERS and also any + registers that can be used without being saved. + The latter must include the registers where values are returned + and the register where structure-value addresses are passed. + Aside from that, you can include as many other registers as you like. + + Value is set to 1 if the register is call used unconditionally. + Bit one is set if the register is call used on TARGET_32BIT ABI. + Bit two is set if the register is call used on TARGET_64BIT ABI. + Bit three is set if the register is call used on TARGET_64BIT_MS_ABI. + + Proper values are computed in TARGET_CONDITIONAL_REGISTER_USAGE. */ + +#define CALL_USED_REGISTERS_MASK(IS_64BIT_MS_ABI) \ + ((IS_64BIT_MS_ABI) ? (1 << 3) : TARGET_64BIT ? (1 << 2) : (1 << 1)) + +#define CALL_USED_REGISTERS \ +/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \ +{ 1, 1, 1, 0, 4, 4, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ +/*arg,flags,fpsr,frame*/ \ + 1, 1, 1, 1, \ +/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \ + 1, 1, 1, 1, 1, 1, 6, 6, \ +/* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \ + 1, 1, 1, 1, 1, 1, 1, 1, \ +/* r8, r9, r10, r11, r12, r13, r14, r15*/ \ + 1, 1, 1, 1, 2, 2, 2, 2, \ +/*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \ + 6, 6, 6, 6, 6, 6, 6, 6, \ +/*xmm16,xmm17,xmm18,xmm19,xmm20,xmm21,xmm22,xmm23*/ \ + 1, 1, 1, 1, 1, 1, 1, 1, \ +/*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \ + 1, 1, 1, 1, 1, 1, 1, 1, \ + /* k0, k1, k2, k3, k4, k5, k6, k7*/ \ + 1, 1, 1, 1, 1, 1, 1, 1 } + +/* Order in which to allocate registers. Each register must be + listed once, even those in FIXED_REGISTERS. List frame pointer + late and fixed registers last. Note that, in general, we prefer + registers listed in CALL_USED_REGISTERS, keeping the others + available for storage of persistent values. + + The ADJUST_REG_ALLOC_ORDER actually overwrite the order, + so this is just empty initializer for array. */ + +#define REG_ALLOC_ORDER \ +{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \ + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, \ + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, \ + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75 } + +/* ADJUST_REG_ALLOC_ORDER is a macro which permits reg_alloc_order + to be rearranged based on a particular function. When using sse math, + we want to allocate SSE before x87 registers and vice versa. */ + +#define ADJUST_REG_ALLOC_ORDER x86_order_regs_for_local_alloc () + + +#define OVERRIDE_ABI_FORMAT(FNDECL) ix86_call_abi_override (FNDECL) + +#define HARD_REGNO_NREGS_HAS_PADDING(REGNO, MODE) \ + (TARGET_128BIT_LONG_DOUBLE && !TARGET_64BIT \ + && GENERAL_REGNO_P (REGNO) \ + && ((MODE) == XFmode || (MODE) == XCmode)) + +#define HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE) ((MODE) == XFmode ? 4 : 8) + +#define REGMODE_NATURAL_SIZE(MODE) ix86_regmode_natural_size (MODE) + +#define VALID_AVX256_REG_MODE(MODE) \ + ((MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \ + || (MODE) == V4DImode || (MODE) == V2TImode || (MODE) == V8SFmode \ + || (MODE) == V4DFmode || (MODE) == V16HFmode) + +#define VALID_AVX256_REG_OR_OI_MODE(MODE) \ + (VALID_AVX256_REG_MODE (MODE) || (MODE) == OImode) + +#define VALID_AVX512F_SCALAR_MODE(MODE) \ + ((MODE) == DImode || (MODE) == DFmode || (MODE) == SImode \ + || (MODE) == SFmode) + +#define VALID_AVX512FP16_SCALAR_MODE(MODE) \ + ((MODE) == HImode || (MODE) == HFmode) + +#define VALID_AVX512F_REG_MODE(MODE) \ + ((MODE) == V8DImode || (MODE) == V8DFmode || (MODE) == V64QImode \ + || (MODE) == V16SImode || (MODE) == V16SFmode || (MODE) == V32HImode \ + || (MODE) == V4TImode || (MODE) == V32HFmode) + +#define VALID_AVX512F_REG_OR_XI_MODE(MODE) \ + (VALID_AVX512F_REG_MODE (MODE) || (MODE) == XImode) + +#define VALID_AVX512VL_128_REG_MODE(MODE) \ + ((MODE) == V2DImode || (MODE) == V2DFmode || (MODE) == V16QImode \ + || (MODE) == V4SImode || (MODE) == V4SFmode || (MODE) == V8HImode \ + || (MODE) == TFmode || (MODE) == V1TImode || (MODE) == V8HFmode \ + || (MODE) == TImode) + +#define VALID_AVX512FP16_REG_MODE(MODE) \ + ((MODE) == V8HFmode || (MODE) == V16HFmode || (MODE) == V32HFmode \ + || (MODE) == V2HFmode) + +#define VALID_SSE2_REG_MODE(MODE) \ + ((MODE) == V16QImode || (MODE) == V8HImode || (MODE) == V2DFmode \ + || (MODE) == V8HFmode || (MODE) == V4HFmode || (MODE) == V2HFmode \ + || (MODE) == V4QImode || (MODE) == V2HImode || (MODE) == V1SImode \ + || (MODE) == V2DImode || (MODE) == V2QImode || (MODE) == DFmode \ + || (MODE) == HFmode) + +#define VALID_SSE_REG_MODE(MODE) \ + ((MODE) == V1TImode || (MODE) == TImode \ + || (MODE) == V4SFmode || (MODE) == V4SImode \ + || (MODE) == SFmode || (MODE) == TFmode || (MODE) == TDmode) + +#define VALID_MMX_REG_MODE_3DNOW(MODE) \ + ((MODE) == V2SFmode || (MODE) == SFmode) + +/* To match ia32 psABI, V4HFmode should be added here. */ +#define VALID_MMX_REG_MODE(MODE) \ + ((MODE) == V1DImode || (MODE) == DImode \ + || (MODE) == V2SImode || (MODE) == SImode \ + || (MODE) == V4HImode || (MODE) == V8QImode \ + || (MODE) == V4HFmode) + +#define VALID_MASK_REG_MODE(MODE) ((MODE) == HImode || (MODE) == QImode) + +#define VALID_MASK_AVX512BW_MODE(MODE) ((MODE) == SImode || (MODE) == DImode) + +#define VALID_FP_MODE_P(MODE) \ + ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode \ + || (MODE) == SCmode || (MODE) == DCmode || (MODE) == XCmode) + +#define VALID_INT_MODE_P(MODE) \ + ((MODE) == QImode || (MODE) == HImode \ + || (MODE) == SImode || (MODE) == DImode \ + || (MODE) == CQImode || (MODE) == CHImode \ + || (MODE) == CSImode || (MODE) == CDImode \ + || (MODE) == SDmode || (MODE) == DDmode \ + || (MODE) == HFmode || (MODE) == HCmode \ + || (MODE) == V2HImode || (MODE) == V2HFmode \ + || (MODE) == V1SImode || (MODE) == V4QImode || (MODE) == V2QImode \ + || (TARGET_64BIT \ + && ((MODE) == TImode || (MODE) == CTImode \ + || (MODE) == TFmode || (MODE) == TCmode \ + || (MODE) == V8QImode || (MODE) == V4HImode \ + || (MODE) == V2SImode || (MODE) == TDmode))) + +/* Return true for modes passed in SSE registers. */ +#define SSE_REG_MODE_P(MODE) \ + ((MODE) == V1TImode || (MODE) == TImode || (MODE) == V16QImode \ + || (MODE) == TFmode || (MODE) == V8HImode || (MODE) == V2DFmode \ + || (MODE) == V2DImode || (MODE) == V4SFmode || (MODE) == V4SImode \ + || (MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \ + || (MODE) == V4DImode || (MODE) == V8SFmode || (MODE) == V4DFmode \ + || (MODE) == V2TImode || (MODE) == V8DImode || (MODE) == V64QImode \ + || (MODE) == V16SImode || (MODE) == V32HImode || (MODE) == V8DFmode \ + || (MODE) == V16SFmode || (MODE) == V32HFmode || (MODE) == V16HFmode \ + || (MODE) == V8HFmode) + +#define X87_FLOAT_MODE_P(MODE) \ + (TARGET_80387 && ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode)) + +#define SSE_FLOAT_MODE_P(MODE) \ + ((TARGET_SSE && (MODE) == SFmode) || (TARGET_SSE2 && (MODE) == DFmode)) + +#define SSE_FLOAT_MODE_SSEMATH_OR_HF_P(MODE) \ + ((SSE_FLOAT_MODE_P (MODE) && TARGET_SSE_MATH) \ + || (TARGET_AVX512FP16 && (MODE) == HFmode)) + +#define FMA4_VEC_FLOAT_MODE_P(MODE) \ + (TARGET_FMA4 && ((MODE) == V4SFmode || (MODE) == V2DFmode \ + || (MODE) == V8SFmode || (MODE) == V4DFmode)) + +#define VALID_BCST_MODE_P(MODE) \ + ((MODE) == SFmode || (MODE) == DFmode \ + || (MODE) == SImode || (MODE) == DImode \ + || (MODE) == HFmode) + +/* It is possible to write patterns to move flags; but until someone + does it, */ +#define AVOID_CCMODE_COPIES + +/* Specify the modes required to caller save a given hard regno. + We do this on i386 to prevent flags from being saved at all. + + Kill any attempts to combine saving of modes. */ + +#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ + (CC_REGNO_P (REGNO) ? VOIDmode \ + : (MODE) == VOIDmode && (NREGS) != 1 ? VOIDmode \ + : (MODE) == VOIDmode ? choose_hard_reg_mode ((REGNO), (NREGS), NULL) \ + : (MODE) == HImode && !((GENERAL_REGNO_P (REGNO) \ + && TARGET_PARTIAL_REG_STALL) \ + || MASK_REGNO_P (REGNO)) ? SImode \ + : (MODE) == QImode && !(ANY_QI_REGNO_P (REGNO) \ + || MASK_REGNO_P (REGNO)) ? SImode \ + : (MODE)) + +/* Specify the registers used for certain standard purposes. + The values of these macros are register numbers. */ + +/* on the 386 the pc register is %eip, and is not usable as a general + register. The ordinary mov instructions won't work */ +/* #define PC_REGNUM */ + +/* Base register for access to arguments of the function. */ +#define ARG_POINTER_REGNUM ARGP_REG + +/* Register to use for pushing function arguments. */ +#define STACK_POINTER_REGNUM SP_REG + +/* Base register for access to local variables of the function. */ +#define FRAME_POINTER_REGNUM FRAME_REG +#define HARD_FRAME_POINTER_REGNUM BP_REG + +#define FIRST_INT_REG AX_REG +#define LAST_INT_REG SP_REG + +#define FIRST_QI_REG AX_REG +#define LAST_QI_REG BX_REG + +/* First & last stack-like regs */ +#define FIRST_STACK_REG ST0_REG +#define LAST_STACK_REG ST7_REG + +#define FIRST_SSE_REG XMM0_REG +#define LAST_SSE_REG XMM7_REG + +#define FIRST_MMX_REG MM0_REG +#define LAST_MMX_REG MM7_REG + +#define FIRST_REX_INT_REG R8_REG +#define LAST_REX_INT_REG R15_REG + +#define FIRST_REX_SSE_REG XMM8_REG +#define LAST_REX_SSE_REG XMM15_REG + +#define FIRST_EXT_REX_SSE_REG XMM16_REG +#define LAST_EXT_REX_SSE_REG XMM31_REG + +#define FIRST_MASK_REG MASK0_REG +#define LAST_MASK_REG MASK7_REG + +/* Override this in other tm.h files to cope with various OS lossage + requiring a frame pointer. */ +#ifndef SUBTARGET_FRAME_POINTER_REQUIRED +#define SUBTARGET_FRAME_POINTER_REQUIRED 0 +#endif + +/* Define the shadow offset for asan. Other OS's can override in the + respective tm.h files. */ +#ifndef SUBTARGET_SHADOW_OFFSET +#define SUBTARGET_SHADOW_OFFSET \ + (TARGET_LP64 ? HOST_WIDE_INT_C (0x7fff8000) : HOST_WIDE_INT_1 << 29) +#endif + +/* Make sure we can access arbitrary call frames. */ +#define SETUP_FRAME_ADDRESSES() ix86_setup_frame_addresses () + +/* Register to hold the addressing base for position independent + code access to data items. We don't use PIC pointer for 64bit + mode. Define the regnum to dummy value to prevent gcc from + pessimizing code dealing with EBX. + + To avoid clobbering a call-saved register unnecessarily, we renumber + the pic register when possible. The change is visible after the + prologue has been emitted. */ + +#define REAL_PIC_OFFSET_TABLE_REGNUM (TARGET_64BIT ? R15_REG : BX_REG) + +#define PIC_OFFSET_TABLE_REGNUM \ + (ix86_use_pseudo_pic_reg () \ + ? (pic_offset_table_rtx \ + ? INVALID_REGNUM \ + : REAL_PIC_OFFSET_TABLE_REGNUM) \ + : INVALID_REGNUM) + +#define GOT_SYMBOL_NAME "_GLOBAL_OFFSET_TABLE_" + +/* This is overridden by . */ +#define MS_AGGREGATE_RETURN 0 + +#define KEEP_AGGREGATE_RETURN_POINTER 0 + +/* Define the classes of registers for register constraints in the + machine description. Also define ranges of constants. + + One of the classes must always be named ALL_REGS and include all hard regs. + If there is more than one class, another class must be named NO_REGS + and contain no registers. + + The name GENERAL_REGS must be the name of a class (or an alias for + another name such as ALL_REGS). This is the class of registers + that is allowed by "g" or "r" in a register constraint. + Also, registers outside this class are allocated only when + instructions express preferences for them. + + The classes must be numbered in nondecreasing order; that is, + a larger-numbered class must never be contained completely + in a smaller-numbered class. This is why CLOBBERED_REGS class + is listed early, even though in 64-bit mode it contains more + registers than just %eax, %ecx, %edx. + + For any two classes, it is very desirable that there be another + class that represents their union. + + The flags and fpsr registers are in no class. */ + +enum reg_class +{ + NO_REGS, + AREG, DREG, CREG, BREG, SIREG, DIREG, + AD_REGS, /* %eax/%edx for DImode */ + CLOBBERED_REGS, /* call-clobbered integer registers */ + Q_REGS, /* %eax %ebx %ecx %edx */ + NON_Q_REGS, /* %esi %edi %ebp %esp */ + TLS_GOTBASE_REGS, /* %ebx %ecx %edx %esi %edi %ebp */ + INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp */ + LEGACY_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp */ + GENERAL_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp + %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 */ + FP_TOP_REG, FP_SECOND_REG, /* %st(0) %st(1) */ + FLOAT_REGS, + SSE_FIRST_REG, + NO_REX_SSE_REGS, + SSE_REGS, + ALL_SSE_REGS, + MMX_REGS, + FLOAT_SSE_REGS, + FLOAT_INT_REGS, + INT_SSE_REGS, + FLOAT_INT_SSE_REGS, + MASK_REGS, + ALL_MASK_REGS, + INT_MASK_REGS, + ALL_REGS, + LIM_REG_CLASSES +}; + +#define N_REG_CLASSES ((int) LIM_REG_CLASSES) + +#define INTEGER_CLASS_P(CLASS) \ + reg_class_subset_p ((CLASS), GENERAL_REGS) +#define FLOAT_CLASS_P(CLASS) \ + reg_class_subset_p ((CLASS), FLOAT_REGS) +#define SSE_CLASS_P(CLASS) \ + reg_class_subset_p ((CLASS), ALL_SSE_REGS) +#define INT_SSE_CLASS_P(CLASS) \ + reg_class_subset_p ((CLASS), INT_SSE_REGS) +#define MMX_CLASS_P(CLASS) \ + ((CLASS) == MMX_REGS) +#define MASK_CLASS_P(CLASS) \ + reg_class_subset_p ((CLASS), ALL_MASK_REGS) +#define MAYBE_INTEGER_CLASS_P(CLASS) \ + reg_classes_intersect_p ((CLASS), GENERAL_REGS) +#define MAYBE_FLOAT_CLASS_P(CLASS) \ + reg_classes_intersect_p ((CLASS), FLOAT_REGS) +#define MAYBE_SSE_CLASS_P(CLASS) \ + reg_classes_intersect_p ((CLASS), ALL_SSE_REGS) +#define MAYBE_MMX_CLASS_P(CLASS) \ + reg_classes_intersect_p ((CLASS), MMX_REGS) +#define MAYBE_MASK_CLASS_P(CLASS) \ + reg_classes_intersect_p ((CLASS), ALL_MASK_REGS) + +#define Q_CLASS_P(CLASS) \ + reg_class_subset_p ((CLASS), Q_REGS) + +#define MAYBE_NON_Q_CLASS_P(CLASS) \ + reg_classes_intersect_p ((CLASS), NON_Q_REGS) + +/* Give names of register classes as strings for dump file. */ + +#define REG_CLASS_NAMES \ +{ "NO_REGS", \ + "AREG", "DREG", "CREG", "BREG", \ + "SIREG", "DIREG", \ + "AD_REGS", \ + "CLOBBERED_REGS", \ + "Q_REGS", "NON_Q_REGS", \ + "TLS_GOTBASE_REGS", \ + "INDEX_REGS", \ + "LEGACY_REGS", \ + "GENERAL_REGS", \ + "FP_TOP_REG", "FP_SECOND_REG", \ + "FLOAT_REGS", \ + "SSE_FIRST_REG", \ + "NO_REX_SSE_REGS", \ + "SSE_REGS", \ + "ALL_SSE_REGS", \ + "MMX_REGS", \ + "FLOAT_SSE_REGS", \ + "FLOAT_INT_REGS", \ + "INT_SSE_REGS", \ + "FLOAT_INT_SSE_REGS", \ + "MASK_REGS", \ + "ALL_MASK_REGS", \ + "INT_MASK_REGS", \ + "ALL_REGS" } + +/* Define which registers fit in which classes. This is an initializer + for a vector of HARD_REG_SET of length N_REG_CLASSES. + + Note that CLOBBERED_REGS are calculated by + TARGET_CONDITIONAL_REGISTER_USAGE. */ + +#define REG_CLASS_CONTENTS \ +{ { 0x0, 0x0, 0x0 }, /* NO_REGS */ \ + { 0x01, 0x0, 0x0 }, /* AREG */ \ + { 0x02, 0x0, 0x0 }, /* DREG */ \ + { 0x04, 0x0, 0x0 }, /* CREG */ \ + { 0x08, 0x0, 0x0 }, /* BREG */ \ + { 0x10, 0x0, 0x0 }, /* SIREG */ \ + { 0x20, 0x0, 0x0 }, /* DIREG */ \ + { 0x03, 0x0, 0x0 }, /* AD_REGS */ \ + { 0x07, 0x0, 0x0 }, /* CLOBBERED_REGS */ \ + { 0x0f, 0x0, 0x0 }, /* Q_REGS */ \ + { 0x900f0, 0x0, 0x0 }, /* NON_Q_REGS */ \ + { 0x7e, 0xff0, 0x0 }, /* TLS_GOTBASE_REGS */ \ + { 0x7f, 0xff0, 0x0 }, /* INDEX_REGS */ \ + { 0x900ff, 0x0, 0x0 }, /* LEGACY_REGS */ \ + { 0x900ff, 0xff0, 0x0 }, /* GENERAL_REGS */ \ + { 0x100, 0x0, 0x0 }, /* FP_TOP_REG */ \ + { 0x200, 0x0, 0x0 }, /* FP_SECOND_REG */ \ + { 0xff00, 0x0, 0x0 }, /* FLOAT_REGS */ \ + { 0x100000, 0x0, 0x0 }, /* SSE_FIRST_REG */ \ + { 0xff00000, 0x0, 0x0 }, /* NO_REX_SSE_REGS */ \ + { 0xff00000, 0xff000, 0x0 }, /* SSE_REGS */ \ + { 0xff00000, 0xfffff000, 0xf }, /* ALL_SSE_REGS */ \ +{ 0xf0000000, 0xf, 0x0 }, /* MMX_REGS */ \ + { 0xff0ff00, 0xfffff000, 0xf }, /* FLOAT_SSE_REGS */ \ + { 0x9ffff, 0xff0, 0x0 }, /* FLOAT_INT_REGS */ \ + { 0xff900ff, 0xfffffff0, 0xf }, /* INT_SSE_REGS */ \ + { 0xff9ffff, 0xfffffff0, 0xf }, /* FLOAT_INT_SSE_REGS */ \ + { 0x0, 0x0, 0xfe0 }, /* MASK_REGS */ \ + { 0x0, 0x0, 0xff0 }, /* ALL_MASK_REGS */ \ + { 0x900ff, 0xff0, 0xff0 }, /* INT_MASK_REGS */ \ +{ 0xffffffff, 0xffffffff, 0xfff } /* ALL_REGS */ \ +} + +/* The same information, inverted: + Return the class number of the smallest class containing + reg number REGNO. This could be a conditional expression + or could index an array. */ + +#define REGNO_REG_CLASS(REGNO) (regclass_map[(REGNO)]) + +/* When this hook returns true for MODE, the compiler allows + registers explicitly used in the rtl to be used as spill registers + but prevents the compiler from extending the lifetime of these + registers. */ +#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P hook_bool_mode_true + +#define QI_REG_P(X) (REG_P (X) && QI_REGNO_P (REGNO (X))) +#define QI_REGNO_P(N) IN_RANGE ((N), FIRST_QI_REG, LAST_QI_REG) + +#define LEGACY_INT_REG_P(X) (REG_P (X) && LEGACY_INT_REGNO_P (REGNO (X))) +#define LEGACY_INT_REGNO_P(N) (IN_RANGE ((N), FIRST_INT_REG, LAST_INT_REG)) + +#define REX_INT_REG_P(X) (REG_P (X) && REX_INT_REGNO_P (REGNO (X))) +#define REX_INT_REGNO_P(N) \ + IN_RANGE ((N), FIRST_REX_INT_REG, LAST_REX_INT_REG) + +#define GENERAL_REG_P(X) (REG_P (X) && GENERAL_REGNO_P (REGNO (X))) +#define GENERAL_REGNO_P(N) \ + (LEGACY_INT_REGNO_P (N) || REX_INT_REGNO_P (N)) + +#define ANY_QI_REG_P(X) (REG_P (X) && ANY_QI_REGNO_P (REGNO (X))) +#define ANY_QI_REGNO_P(N) \ + (TARGET_64BIT ? GENERAL_REGNO_P (N) : QI_REGNO_P (N)) + +#define STACK_REG_P(X) (REG_P (X) && STACK_REGNO_P (REGNO (X))) +#define STACK_REGNO_P(N) IN_RANGE ((N), FIRST_STACK_REG, LAST_STACK_REG) + +#define SSE_REG_P(X) (REG_P (X) && SSE_REGNO_P (REGNO (X))) +#define SSE_REGNO_P(N) \ + (LEGACY_SSE_REGNO_P (N) \ + || REX_SSE_REGNO_P (N) \ + || EXT_REX_SSE_REGNO_P (N)) + +#define LEGACY_SSE_REGNO_P(N) \ + IN_RANGE ((N), FIRST_SSE_REG, LAST_SSE_REG) + +#define REX_SSE_REGNO_P(N) \ + IN_RANGE ((N), FIRST_REX_SSE_REG, LAST_REX_SSE_REG) + +#define EXT_REX_SSE_REG_P(X) (REG_P (X) && EXT_REX_SSE_REGNO_P (REGNO (X))) + +#define EXT_REX_SSE_REGNO_P(N) \ + IN_RANGE ((N), FIRST_EXT_REX_SSE_REG, LAST_EXT_REX_SSE_REG) + +#define ANY_FP_REG_P(X) (REG_P (X) && ANY_FP_REGNO_P (REGNO (X))) +#define ANY_FP_REGNO_P(N) (STACK_REGNO_P (N) || SSE_REGNO_P (N)) + +#define MASK_REG_P(X) (REG_P (X) && MASK_REGNO_P (REGNO (X))) +#define MASK_REGNO_P(N) IN_RANGE ((N), FIRST_MASK_REG, LAST_MASK_REG) +#define MASK_PAIR_REGNO_P(N) ((((N) - FIRST_MASK_REG) & 1) == 0) + +#define MMX_REG_P(X) (REG_P (X) && MMX_REGNO_P (REGNO (X))) +#define MMX_REGNO_P(N) IN_RANGE ((N), FIRST_MMX_REG, LAST_MMX_REG) + +#define CC_REG_P(X) (REG_P (X) && CC_REGNO_P (REGNO (X))) +#define CC_REGNO_P(X) ((X) == FLAGS_REG) + +#define MOD4_SSE_REG_P(X) (REG_P (X) && MOD4_SSE_REGNO_P (REGNO (X))) +#define MOD4_SSE_REGNO_P(N) ((N) == XMM0_REG \ + || (N) == XMM4_REG \ + || (N) == XMM8_REG \ + || (N) == XMM12_REG \ + || (N) == XMM16_REG \ + || (N) == XMM20_REG \ + || (N) == XMM24_REG \ + || (N) == XMM28_REG) + +/* First floating point reg */ +#define FIRST_FLOAT_REG FIRST_STACK_REG +#define STACK_TOP_P(X) (REG_P (X) && REGNO (X) == FIRST_FLOAT_REG) + +#define GET_SSE_REGNO(N) \ + ((N) < 8 ? FIRST_SSE_REG + (N) \ + : (N) < 16 ? FIRST_REX_SSE_REG + (N) - 8 \ + : FIRST_EXT_REX_SSE_REG + (N) - 16) + +/* The class value for index registers, and the one for base regs. */ + +#define INDEX_REG_CLASS INDEX_REGS +#define BASE_REG_CLASS GENERAL_REGS + +/* Stack layout; function entry, exit and calling. */ + +/* Define this if pushing a word on the stack + makes the stack pointer a smaller address. */ +#define STACK_GROWS_DOWNWARD 1 + +/* Define this to nonzero if the nominal address of the stack frame + is at the high-address end of the local variables; + that is, each additional local variable allocated + goes at a more negative offset in the frame. */ +#define FRAME_GROWS_DOWNWARD 1 + +#define PUSH_ROUNDING(BYTES) ix86_push_rounding (BYTES) + +/* If defined, the maximum amount of space required for outgoing arguments + will be computed and placed into the variable `crtl->outgoing_args_size'. + No space will be pushed onto the stack for each call; instead, the + function prologue should increase the stack frame size by this amount. + + In 32bit mode enabling argument accumulation results in about 5% code size + growth because move instructions are less compact than push. In 64bit + mode the difference is less drastic but visible. + + FIXME: Unlike earlier implementations, the size of unwind info seems to + actually grow with accumulation. Is that because accumulated args + unwind info became unnecesarily bloated? + + With the 64-bit MS ABI, we can generate correct code with or without + accumulated args, but because of OUTGOING_REG_PARM_STACK_SPACE the code + generated without accumulated args is terrible. + + If stack probes are required, the space used for large function + arguments on the stack must also be probed, so enable + -maccumulate-outgoing-args so this happens in the prologue. + + We must use argument accumulation in interrupt function if stack + may be realigned to avoid DRAP. */ + +#define ACCUMULATE_OUTGOING_ARGS \ + ((TARGET_ACCUMULATE_OUTGOING_ARGS \ + && optimize_function_for_speed_p (cfun)) \ + || (cfun->machine->func_type != TYPE_NORMAL \ + && crtl->stack_realign_needed) \ + || TARGET_STACK_PROBE \ + || TARGET_64BIT_MS_ABI \ + || (TARGET_MACHO && crtl->profile)) + +/* We want the stack and args grow in opposite directions, even if + targetm.calls.push_argument returns false. */ +#define PUSH_ARGS_REVERSED 1 + +/* Offset of first parameter from the argument pointer register value. */ +#define FIRST_PARM_OFFSET(FNDECL) 0 + +/* Define this macro if functions should assume that stack space has been + allocated for arguments even when their values are passed in registers. + + The value of this macro is the size, in bytes, of the area reserved for + arguments passed in registers for the function represented by FNDECL. + + This space can be allocated by the caller, or be a part of the + machine-dependent stack frame: `OUTGOING_REG_PARM_STACK_SPACE' says + which. */ +#define REG_PARM_STACK_SPACE(FNDECL) ix86_reg_parm_stack_space (FNDECL) + +#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) \ + (TARGET_64BIT && ix86_function_type_abi (FNTYPE) == MS_ABI) + +/* Define how to find the value returned by a library function + assuming the value has mode MODE. */ + +#define LIBCALL_VALUE(MODE) ix86_libcall_value (MODE) + +/* Define the size of the result block used for communication between + untyped_call and untyped_return. The block contains a DImode value + followed by the block used by fnsave and frstor. */ + +#define APPLY_RESULT_SIZE (8+108) + +/* 1 if N is a possible register number for function argument passing. */ +#define FUNCTION_ARG_REGNO_P(N) ix86_function_arg_regno_p (N) + +/* Define a data type for recording info about an argument list + during the scan of that argument list. This data type should + hold all necessary information about the function itself + and about the args processed so far, enough to enable macros + such as FUNCTION_ARG to determine where the next arg should go. */ + +typedef struct ix86_args { + int words; /* # words passed so far */ + int nregs; /* # registers available for passing */ + int regno; /* next available register number */ + int fastcall; /* fastcall or thiscall calling convention + is used */ + int sse_words; /* # sse words passed so far */ + int sse_nregs; /* # sse registers available for passing */ + int warn_avx512f; /* True when we want to warn + about AVX512F ABI. */ + int warn_avx; /* True when we want to warn about AVX ABI. */ + int warn_sse; /* True when we want to warn about SSE ABI. */ + int warn_mmx; /* True when we want to warn about MMX ABI. */ + int warn_empty; /* True when we want to warn about empty classes + passing ABI change. */ + int sse_regno; /* next available sse register number */ + int mmx_words; /* # mmx words passed so far */ + int mmx_nregs; /* # mmx registers available for passing */ + int mmx_regno; /* next available mmx register number */ + int maybe_vaarg; /* true for calls to possibly vardic fncts. */ + int caller; /* true if it is caller. */ + int float_in_sse; /* Set to 1 or 2 for 32bit targets if + SFmode/DFmode arguments should be passed + in SSE registers. Otherwise 0. */ + int stdarg; /* Set to 1 if function is stdarg. */ + enum calling_abi call_abi; /* Set to SYSV_ABI for sysv abi. Otherwise + MS_ABI for ms abi. */ + tree decl; /* Callee decl. */ +} CUMULATIVE_ARGS; + +/* Initialize a variable CUM of type CUMULATIVE_ARGS + for a call to a function whose data type is FNTYPE. + For a library call, FNTYPE is 0. */ + +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \ + init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL), \ + (N_NAMED_ARGS) != -1) + +/* Output assembler code to FILE to increment profiler label # LABELNO + for profiling a function entry. */ + +#define FUNCTION_PROFILER(FILE, LABELNO) \ + x86_function_profiler ((FILE), (LABELNO)) + +#define MCOUNT_NAME "_mcount" + +#define MCOUNT_NAME_BEFORE_PROLOGUE "__fentry__" + +#define PROFILE_COUNT_REGISTER "edx" + +/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, + the stack pointer does not matter. The value is tested only in + functions that have frame pointers. + No definition is equivalent to always zero. */ +/* Note on the 386 it might be more efficient not to define this since + we have to restore it ourselves from the frame pointer, in order to + use pop */ + +#define EXIT_IGNORE_STACK 1 + +/* Define this macro as a C expression that is nonzero for registers + used by the epilogue or the `return' pattern. */ + +#define EPILOGUE_USES(REGNO) ix86_epilogue_uses (REGNO) + +/* Output assembler code for a block containing the constant parts + of a trampoline, leaving space for the variable parts. */ + +/* On the 386, the trampoline contains two instructions: + mov #STATIC,ecx + jmp FUNCTION + The trampoline is generated entirely at runtime. The operand of JMP + is the address of FUNCTION relative to the instruction following the + JMP (which is 5 bytes long). */ + +/* Length in units of the trampoline for entering a nested function. */ + +#define TRAMPOLINE_SIZE (TARGET_64BIT ? 28 : 14) + +/* Definitions for register eliminations. + + This is an array of structures. Each structure initializes one pair + of eliminable registers. The "from" register number is given first, + followed by "to". Eliminations of the same "from" register are listed + in order of preference. + + There are two registers that can always be eliminated on the i386. + The frame pointer and the arg pointer can be replaced by either the + hard frame pointer or to the stack pointer, depending upon the + circumstances. The hard frame pointer is not used before reload and + so it is not eligible for elimination. */ + +#define ELIMINABLE_REGS \ +{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ + { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \ + +/* Define the offset between two registers, one to be eliminated, and the other + its replacement, at the start of a routine. */ + +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ + ((OFFSET) = ix86_initial_elimination_offset ((FROM), (TO))) + +/* Addressing modes, and classification of registers for them. */ + +/* Macros to check register numbers against specific register classes. */ + +/* These assume that REGNO is a hard or pseudo reg number. + They give nonzero only if REGNO is a hard reg of the suitable class + or a pseudo reg currently allocated to a suitable hard reg. + Since they use reg_renumber, they are safe only once reg_renumber + has been allocated, which happens in reginfo.cc during register + allocation. */ + +#define REGNO_OK_FOR_INDEX_P(REGNO) \ + ((REGNO) < STACK_POINTER_REGNUM \ + || REX_INT_REGNO_P (REGNO) \ + || (unsigned) reg_renumber[(REGNO)] < STACK_POINTER_REGNUM \ + || REX_INT_REGNO_P ((unsigned) reg_renumber[(REGNO)])) + +#define REGNO_OK_FOR_BASE_P(REGNO) \ + (GENERAL_REGNO_P (REGNO) \ + || (REGNO) == ARG_POINTER_REGNUM \ + || (REGNO) == FRAME_POINTER_REGNUM \ + || GENERAL_REGNO_P ((unsigned) reg_renumber[(REGNO)])) + +/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx + and check its validity for a certain class. + We have two alternate definitions for each of them. + The usual definition accepts all pseudo regs; the other rejects + them unless they have been allocated suitable hard regs. + The symbol REG_OK_STRICT causes the latter definition to be used. + + Most source files want to accept pseudo regs in the hope that + they will get allocated to the class that the insn wants them to be in. + Source files for reload pass need to be strict. + After reload, it makes no difference, since pseudo regs have + been eliminated by then. */ + + +/* Non strict versions, pseudos are ok. */ +#define REG_OK_FOR_INDEX_NONSTRICT_P(X) \ + (REGNO (X) < STACK_POINTER_REGNUM \ + || REX_INT_REGNO_P (REGNO (X)) \ + || REGNO (X) >= FIRST_PSEUDO_REGISTER) + +#define REG_OK_FOR_BASE_NONSTRICT_P(X) \ + (GENERAL_REGNO_P (REGNO (X)) \ + || REGNO (X) == ARG_POINTER_REGNUM \ + || REGNO (X) == FRAME_POINTER_REGNUM \ + || REGNO (X) >= FIRST_PSEUDO_REGISTER) + +/* Strict versions, hard registers only */ +#define REG_OK_FOR_INDEX_STRICT_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X)) +#define REG_OK_FOR_BASE_STRICT_P(X) REGNO_OK_FOR_BASE_P (REGNO (X)) + +#ifndef REG_OK_STRICT +#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_NONSTRICT_P (X) +#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_NONSTRICT_P (X) + +#else +#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_STRICT_P (X) +#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_STRICT_P (X) +#endif + +/* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression + that is a valid memory address for an instruction. + The MODE argument is the machine mode for the MEM expression + that wants to use this address. + + The other macros defined here are used only in TARGET_LEGITIMATE_ADDRESS_P, + except for CONSTANT_ADDRESS_P which is usually machine-independent. + + See legitimize_pic_address in i386.cc for details as to what + constitutes a legitimate address when -fpic is used. */ + +#define MAX_REGS_PER_ADDRESS 2 + +#define CONSTANT_ADDRESS_P(X) constant_address_p (X) + +/* If defined, a C expression to determine the base term of address X. + This macro is used in only one place: `find_base_term' in alias.cc. + + It is always safe for this macro to not be defined. It exists so + that alias analysis can understand machine-dependent addresses. + + The typical use of this macro is to handle addresses containing + a label_ref or symbol_ref within an UNSPEC. */ + +#define FIND_BASE_TERM(X) ix86_find_base_term (X) + +/* Nonzero if the constant value X is a legitimate general operand + when generating PIC code. It is given that flag_pic is on and + that X satisfies CONSTANT_P or is a CONST_DOUBLE. */ + +#define LEGITIMATE_PIC_OPERAND_P(X) legitimate_pic_operand_p (X) + +#define STRIP_UNARY(X) (UNARY_P (X) ? XEXP (X, 0) : X) + +#define SYMBOLIC_CONST(X) \ + (GET_CODE (X) == SYMBOL_REF \ + || GET_CODE (X) == LABEL_REF \ + || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X))) + +/* Max number of args passed in registers. If this is more than 3, we will + have problems with ebx (register #4), since it is a caller save register and + is also used as the pic register in ELF. So for now, don't allow more than + 3 registers to be passed in registers. */ + +/* Abi specific values for REGPARM_MAX and SSE_REGPARM_MAX */ +#define X86_64_REGPARM_MAX 6 +#define X86_64_MS_REGPARM_MAX 4 + +#define X86_32_REGPARM_MAX 3 + +#define REGPARM_MAX \ + (TARGET_64BIT \ + ? (TARGET_64BIT_MS_ABI \ + ? X86_64_MS_REGPARM_MAX \ + : X86_64_REGPARM_MAX) \ + : X86_32_REGPARM_MAX) + +#define X86_64_SSE_REGPARM_MAX 8 +#define X86_64_MS_SSE_REGPARM_MAX 4 + +#define X86_32_SSE_REGPARM_MAX (TARGET_SSE ? (TARGET_MACHO ? 4 : 3) : 0) + +#define SSE_REGPARM_MAX \ + (TARGET_64BIT \ + ? (TARGET_64BIT_MS_ABI \ + ? X86_64_MS_SSE_REGPARM_MAX \ + : X86_64_SSE_REGPARM_MAX) \ + : X86_32_SSE_REGPARM_MAX) + +#define X86_32_MMX_REGPARM_MAX (TARGET_MMX ? (TARGET_MACHO ? 0 : 3) : 0) + +#define MMX_REGPARM_MAX (TARGET_64BIT ? 0 : X86_32_MMX_REGPARM_MAX) + +/* Specify the machine mode that this machine uses + for the index in the tablejump instruction. */ +#define CASE_VECTOR_MODE \ + (!TARGET_LP64 || (flag_pic && ix86_cmodel != CM_LARGE_PIC) ? SImode : DImode) + +/* Define this as 1 if `char' should by default be signed; else as 0. */ +#define DEFAULT_SIGNED_CHAR 1 + +/* The constant maximum number of bytes that a single instruction can + move quickly between memory and registers or between two memory + locations. */ +#define MAX_MOVE_MAX 64 + +/* Max number of bytes we can move from memory to memory in one + reasonably fast instruction, as opposed to MOVE_MAX_PIECES which + is the number of bytes at a time which we can move efficiently. + MOVE_MAX_PIECES defaults to MOVE_MAX. */ + +#define MOVE_MAX \ + ((TARGET_AVX512F \ + && (ix86_move_max == PVW_AVX512 \ + || ix86_store_max == PVW_AVX512)) \ + ? 64 \ + : ((TARGET_AVX \ + && (ix86_move_max >= PVW_AVX256 \ + || ix86_store_max >= PVW_AVX256)) \ + ? 32 \ + : ((TARGET_SSE2 \ + && TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \ + && TARGET_SSE_UNALIGNED_STORE_OPTIMAL) \ + ? 16 : UNITS_PER_WORD))) + +/* STORE_MAX_PIECES is the number of bytes at a time that we can store + efficiently. Allow 16/32/64 bytes only if inter-unit move is enabled + since vec_duplicate enabled by inter-unit move is used to implement + store_by_pieces of 16/32/64 bytes. */ +#define STORE_MAX_PIECES \ + (TARGET_INTER_UNIT_MOVES_TO_VEC \ + ? ((TARGET_AVX512F && ix86_store_max == PVW_AVX512) \ + ? 64 \ + : ((TARGET_AVX \ + && ix86_store_max >= PVW_AVX256) \ + ? 32 \ + : ((TARGET_SSE2 \ + && TARGET_SSE_UNALIGNED_STORE_OPTIMAL) \ + ? 16 : UNITS_PER_WORD))) \ + : UNITS_PER_WORD) + +/* If a memory-to-memory move would take MOVE_RATIO or more simple + move-instruction pairs, we will do a cpymem or libcall instead. + Increasing the value will always make code faster, but eventually + incurs high cost in increased code size. + + If you don't define this, a reasonable default is used. */ + +#define MOVE_RATIO(speed) ((speed) ? ix86_cost->move_ratio : 3) + +/* If a clear memory operation would take CLEAR_RATIO or more simple + move-instruction sequences, we will do a clrmem or libcall instead. */ + +#define CLEAR_RATIO(speed) ((speed) ? ix86_cost->clear_ratio : 2) + +/* Define if shifts truncate the shift count which implies one can + omit a sign-extension or zero-extension of a shift count. + + On i386, shifts do truncate the count. But bit test instructions + take the modulo of the bit offset operand. */ + +/* #define SHIFT_COUNT_TRUNCATED */ + +/* A macro to update M and UNSIGNEDP when an object whose type is + TYPE and which has the specified mode and signedness is to be + stored in a register. This macro is only called when TYPE is a + scalar type. + + On i386 it is sometimes useful to promote HImode and QImode + quantities to SImode. The choice depends on target type. */ + +#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ +do { \ + if (((MODE) == HImode && TARGET_PROMOTE_HI_REGS) \ + || ((MODE) == QImode && TARGET_PROMOTE_QI_REGS)) \ + (MODE) = SImode; \ +} while (0) + +/* Specify the machine mode that pointers have. + After generation of rtl, the compiler makes no further distinction + between pointers and any other objects of this machine mode. */ +#define Pmode (ix86_pmode == PMODE_DI ? DImode : SImode) + +/* Supply a definition of STACK_SAVEAREA_MODE for emit_stack_save. + NONLOCAL needs space to save both shadow stack and stack pointers. + + FIXME: We only need to save and restore stack pointer in ptr_mode. + But expand_builtin_setjmp_setup and expand_builtin_longjmp use Pmode + to save and restore stack pointer. See + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84150 + */ +#define STACK_SAVEAREA_MODE(LEVEL) \ + ((LEVEL) == SAVE_NONLOCAL ? (TARGET_64BIT ? TImode : DImode) : Pmode) + +/* Specify the machine_mode of the size increment + operand of an 'allocate_stack' named pattern. */ +#define STACK_SIZE_MODE Pmode + +/* A C expression whose value is zero if pointers that need to be extended + from being `POINTER_SIZE' bits wide to `Pmode' are sign-extended and + greater then zero if they are zero-extended and less then zero if the + ptr_extend instruction should be used. */ + +#define POINTERS_EXTEND_UNSIGNED 1 + +/* A function address in a call instruction + is a byte address (for indexing purposes) + so give the MEM rtx a byte's mode. */ +#define FUNCTION_MODE QImode + + +/* A C expression for the cost of a branch instruction. A value of 1 + is the default; other values are interpreted relative to that. */ + +#define BRANCH_COST(speed_p, predictable_p) \ + (!(speed_p) ? 2 : (predictable_p) ? 0 : ix86_branch_cost) + +/* An integer expression for the size in bits of the largest integer machine + mode that should actually be used. We allow pairs of registers. */ +#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TARGET_64BIT ? TImode : DImode) + +/* Define this macro as a C expression which is nonzero if accessing + less than a word of memory (i.e. a `char' or a `short') is no + faster than accessing a word of memory, i.e., if such access + require more than one instruction or if there is no difference in + cost between byte and (aligned) word loads. + + When this macro is not defined, the compiler will access a field by + finding the smallest containing object; when it is defined, a + fullword load will be used if alignment permits. Unless bytes + accesses are faster than word accesses, using word accesses is + preferable since it may eliminate subsequent memory access if + subsequent accesses occur to other fields in the same word of the + structure, but to different bytes. */ + +#define SLOW_BYTE_ACCESS 0 + +/* Nonzero if access to memory by shorts is slow and undesirable. */ +#define SLOW_SHORT_ACCESS 0 + +/* Define this macro if it is as good or better to call a constant + function address than to call an address kept in a register. + + Desirable on the 386 because a CALL with a constant address is + faster than one with a register address. */ + +#define NO_FUNCTION_CSE 1 + +/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE, + return the mode to be used for the comparison. + + For floating-point equality comparisons, CCFPEQmode should be used. + VOIDmode should be used in all other cases. + + For integer comparisons against zero, reduce to CCNOmode or CCZmode if + possible, to allow for more combinations. */ + +#define SELECT_CC_MODE(OP, X, Y) ix86_cc_mode ((OP), (X), (Y)) + +/* Return nonzero if MODE implies a floating point inequality can be + reversed. */ + +#define REVERSIBLE_CC_MODE(MODE) 1 + +/* A C expression whose value is reversed condition code of the CODE for + comparison done in CC_MODE mode. */ +#define REVERSE_CONDITION(CODE, MODE) ix86_reverse_condition ((CODE), (MODE)) + + +/* Control the assembler format that we output, to the extent + this does not vary between assemblers. */ + +/* How to refer to registers in assembler output. + This sequence is indexed by compiler's hard-register-number (see above). */ + +/* In order to refer to the first 8 regs as 32-bit regs, prefix an "e". + For non floating point regs, the following are the HImode names. + + For float regs, the stack top is sometimes referred to as "%st(0)" + instead of just "%st". TARGET_PRINT_OPERAND handles this with the + "y" code. */ + +#define HI_REGISTER_NAMES \ +{"ax","dx","cx","bx","si","di","bp","sp", \ + "st","st(1)","st(2)","st(3)","st(4)","st(5)","st(6)","st(7)", \ + "argp", "flags", "fpsr", "frame", \ + "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7", \ + "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", \ + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \ + "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", \ + "xmm16", "xmm17", "xmm18", "xmm19", \ + "xmm20", "xmm21", "xmm22", "xmm23", \ + "xmm24", "xmm25", "xmm26", "xmm27", \ + "xmm28", "xmm29", "xmm30", "xmm31", \ + "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7" } + +#define REGISTER_NAMES HI_REGISTER_NAMES + +#define QI_REGISTER_NAMES \ +{"al", "dl", "cl", "bl", "sil", "dil", "bpl", "spl"} + +#define QI_HIGH_REGISTER_NAMES \ +{"ah", "dh", "ch", "bh"} + +/* Table of additional register names to use in user input. */ + +#define ADDITIONAL_REGISTER_NAMES \ +{ \ + { "eax", AX_REG }, { "edx", DX_REG }, { "ecx", CX_REG }, { "ebx", BX_REG }, \ + { "esi", SI_REG }, { "edi", DI_REG }, { "ebp", BP_REG }, { "esp", SP_REG }, \ + { "rax", AX_REG }, { "rdx", DX_REG }, { "rcx", CX_REG }, { "rbx", BX_REG }, \ + { "rsi", SI_REG }, { "rdi", DI_REG }, { "rbp", BP_REG }, { "rsp", SP_REG }, \ + { "al", AX_REG }, { "dl", DX_REG }, { "cl", CX_REG }, { "bl", BX_REG }, \ + { "sil", SI_REG }, { "dil", DI_REG }, { "bpl", BP_REG }, { "spl", SP_REG }, \ + { "ah", AX_REG }, { "dh", DX_REG }, { "ch", CX_REG }, { "bh", BX_REG }, \ + { "ymm0", XMM0_REG }, { "ymm1", XMM1_REG }, { "ymm2", XMM2_REG }, { "ymm3", XMM3_REG }, \ + { "ymm4", XMM4_REG }, { "ymm5", XMM5_REG }, { "ymm6", XMM6_REG }, { "ymm7", XMM7_REG }, \ + { "ymm8", XMM8_REG }, { "ymm9", XMM9_REG }, { "ymm10", XMM10_REG }, { "ymm11", XMM11_REG }, \ + { "ymm12", XMM12_REG }, { "ymm13", XMM13_REG }, { "ymm14", XMM14_REG }, { "ymm15", XMM15_REG }, \ + { "ymm16", XMM16_REG }, { "ymm17", XMM17_REG }, { "ymm18", XMM18_REG }, { "ymm19", XMM19_REG }, \ + { "ymm20", XMM20_REG }, { "ymm21", XMM21_REG }, { "ymm22", XMM22_REG }, { "ymm23", XMM23_REG }, \ + { "ymm24", XMM24_REG }, { "ymm25", XMM25_REG }, { "ymm26", XMM26_REG }, { "ymm27", XMM27_REG }, \ + { "ymm28", XMM28_REG }, { "ymm29", XMM29_REG }, { "ymm30", XMM30_REG }, { "ymm31", XMM31_REG }, \ + { "zmm0", XMM0_REG }, { "zmm1", XMM1_REG }, { "zmm2", XMM2_REG }, { "zmm3", XMM3_REG }, \ + { "zmm4", XMM4_REG }, { "zmm5", XMM5_REG }, { "zmm6", XMM6_REG }, { "zmm7", XMM7_REG }, \ + { "zmm8", XMM8_REG }, { "zmm9", XMM9_REG }, { "zmm10", XMM10_REG }, { "zmm11", XMM11_REG }, \ + { "zmm12", XMM12_REG }, { "zmm13", XMM13_REG }, { "zmm14", XMM14_REG }, { "zmm15", XMM15_REG }, \ + { "zmm16", XMM16_REG }, { "zmm17", XMM17_REG }, { "zmm18", XMM18_REG }, { "zmm19", XMM19_REG }, \ + { "zmm20", XMM20_REG }, { "zmm21", XMM21_REG }, { "zmm22", XMM22_REG }, { "zmm23", XMM23_REG }, \ + { "zmm24", XMM24_REG }, { "zmm25", XMM25_REG }, { "zmm26", XMM26_REG }, { "zmm27", XMM27_REG }, \ + { "zmm28", XMM28_REG }, { "zmm29", XMM29_REG }, { "zmm30", XMM30_REG }, { "zmm31", XMM31_REG } \ +} + +/* How to renumber registers for dbx and gdb. */ + +#define DBX_REGISTER_NUMBER(N) \ + (TARGET_64BIT ? dbx64_register_map[(N)] : dbx_register_map[(N)]) + +extern int const dbx_register_map[FIRST_PSEUDO_REGISTER]; +extern int const dbx64_register_map[FIRST_PSEUDO_REGISTER]; +extern int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER]; + +/* Before the prologue, RA is at 0(%esp). */ +#define INCOMING_RETURN_ADDR_RTX \ + gen_rtx_MEM (Pmode, stack_pointer_rtx) + +/* After the prologue, RA is at -4(AP) in the current frame. */ +#define RETURN_ADDR_RTX(COUNT, FRAME) \ + ((COUNT) == 0 \ + ? gen_rtx_MEM (Pmode, plus_constant (Pmode, arg_pointer_rtx, \ + -UNITS_PER_WORD)) \ + : gen_rtx_MEM (Pmode, plus_constant (Pmode, (FRAME), UNITS_PER_WORD))) + +/* PC is dbx register 8; let's use that column for RA. */ +#define DWARF_FRAME_RETURN_COLUMN (TARGET_64BIT ? 16 : 8) + +/* Before the prologue, there are return address and error code for + exception handler on the top of the frame. */ +#define INCOMING_FRAME_SP_OFFSET \ + (cfun->machine->func_type == TYPE_EXCEPTION \ + ? 2 * UNITS_PER_WORD : UNITS_PER_WORD) + +/* The value of INCOMING_FRAME_SP_OFFSET the assembler assumes in + .cfi_startproc. */ +#define DEFAULT_INCOMING_FRAME_SP_OFFSET UNITS_PER_WORD + +/* Describe how we implement __builtin_eh_return. */ +#define EH_RETURN_DATA_REGNO(N) ((N) <= DX_REG ? (N) : INVALID_REGNUM) +#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, CX_REG) + + +/* Select a format to encode pointers in exception handling data. CODE + is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is + true if the symbol may be affected by dynamic relocations. + + ??? All x86 object file formats are capable of representing this. + After all, the relocation needed is the same as for the call insn. + Whether or not a particular assembler allows us to enter such, I + guess we'll have to see. */ +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ + asm_preferred_eh_data_format ((CODE), (GLOBAL)) + +/* These are a couple of extensions to the formats accepted + by asm_fprintf: + %z prints out opcode suffix for word-mode instruction + %r prints out word-mode name for reg_names[arg] */ +#define ASM_FPRINTF_EXTENSIONS(FILE, ARGS, P) \ + case 'z': \ + fputc (TARGET_64BIT ? 'q' : 'l', (FILE)); \ + break; \ + \ + case 'r': \ + { \ + unsigned int regno = va_arg ((ARGS), int); \ + if (LEGACY_INT_REGNO_P (regno)) \ + fputc (TARGET_64BIT ? 'r' : 'e', (FILE)); \ + fputs (reg_names[regno], (FILE)); \ + break; \ + } + +/* This is how to output an insn to push a register on the stack. */ + +#define ASM_OUTPUT_REG_PUSH(FILE, REGNO) \ + asm_fprintf ((FILE), "\tpush%z\t%%%r\n", (REGNO)) + +/* This is how to output an insn to pop a register from the stack. */ + +#define ASM_OUTPUT_REG_POP(FILE, REGNO) \ + asm_fprintf ((FILE), "\tpop%z\t%%%r\n", (REGNO)) + +/* This is how to output an element of a case-vector that is absolute. */ + +#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \ + ix86_output_addr_vec_elt ((FILE), (VALUE)) + +/* This is how to output an element of a case-vector that is relative. */ + +#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \ + ix86_output_addr_diff_elt ((FILE), (VALUE), (REL)) + +/* When we see %v, we will print the 'v' prefix if TARGET_AVX is true. */ + +#define ASM_OUTPUT_AVX_PREFIX(STREAM, PTR) \ +{ \ + if ((PTR)[0] == '%' && (PTR)[1] == 'v') \ + (PTR) += TARGET_AVX ? 1 : 2; \ +} + +/* A C statement or statements which output an assembler instruction + opcode to the stdio stream STREAM. The macro-operand PTR is a + variable of type `char *' which points to the opcode name in + its "internal" form--the form that is written in the machine + description. */ + +#define ASM_OUTPUT_OPCODE(STREAM, PTR) \ + ASM_OUTPUT_AVX_PREFIX ((STREAM), (PTR)) + +/* A C statement to output to the stdio stream FILE an assembler + command to pad the location counter to a multiple of 1<= (1 << (LOG)) - 1) \ + fprintf ((FILE), "\t.p2align %d\n", (LOG)); \ + else \ + fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \ + } \ + } while (0) +#endif + +/* Write the extra assembler code needed to declare a function + properly. */ + +#undef ASM_OUTPUT_FUNCTION_LABEL +#define ASM_OUTPUT_FUNCTION_LABEL(FILE, NAME, DECL) \ + ix86_asm_output_function_label ((FILE), (NAME), (DECL)) + +/* A C statement (sans semicolon) to output a reference to SYMBOL_REF SYM. + If not defined, assemble_name will be used to output the name of the + symbol. This macro may be used to modify the way a symbol is referenced + depending on information encoded by TARGET_ENCODE_SECTION_INFO. */ + +#ifndef ASM_OUTPUT_SYMBOL_REF +#define ASM_OUTPUT_SYMBOL_REF(FILE, SYM) \ + do { \ + const char *name \ + = assemble_name_resolve (XSTR (x, 0)); \ + /* In -masm=att wrap identifiers that start with $ \ + into parens. */ \ + if (ASSEMBLER_DIALECT == ASM_ATT \ + && name[0] == '$' \ + && user_label_prefix[0] == '\0') \ + { \ + fputc ('(', (FILE)); \ + assemble_name_raw ((FILE), name); \ + fputc (')', (FILE)); \ + } \ + else \ + assemble_name_raw ((FILE), name); \ + } while (0) +#endif + +/* Under some conditions we need jump tables in the text section, + because the assembler cannot handle label differences between + sections. */ + +#define JUMP_TABLES_IN_TEXT_SECTION \ + (flag_pic && !(TARGET_64BIT || HAVE_AS_GOTOFF_IN_DATA)) + +/* Switch to init or fini section via SECTION_OP, emit a call to FUNC, + and switch back. For x86 we do this only to save a few bytes that + would otherwise be unused in the text section. */ +#define CRT_MKSTR2(VAL) #VAL +#define CRT_MKSTR(x) CRT_MKSTR2(x) + +#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ + asm (SECTION_OP "\n\t" \ + "call " CRT_MKSTR(__USER_LABEL_PREFIX__) #FUNC "\n" \ + TEXT_SECTION_ASM_OP); + +/* Default threshold for putting data in large sections + with x86-64 medium memory model */ +#define DEFAULT_LARGE_SECTION_THRESHOLD 65536 + +/* Which processor to tune code generation for. These must be in sync + with processor_target_table in i386.cc. */ + +enum processor_type +{ + PROCESSOR_GENERIC = 0, + PROCESSOR_I386, /* 80386 */ + PROCESSOR_I486, /* 80486DX, 80486SX, 80486DX[24] */ + PROCESSOR_PENTIUM, + PROCESSOR_LAKEMONT, + PROCESSOR_PENTIUMPRO, + PROCESSOR_PENTIUM4, + PROCESSOR_NOCONA, + PROCESSOR_CORE2, + PROCESSOR_NEHALEM, + PROCESSOR_SANDYBRIDGE, + PROCESSOR_HASWELL, + PROCESSOR_BONNELL, + PROCESSOR_SILVERMONT, + PROCESSOR_GOLDMONT, + PROCESSOR_GOLDMONT_PLUS, + PROCESSOR_TREMONT, + PROCESSOR_KNL, + PROCESSOR_KNM, + PROCESSOR_SKYLAKE, + PROCESSOR_SKYLAKE_AVX512, + PROCESSOR_CANNONLAKE, + PROCESSOR_ICELAKE_CLIENT, + PROCESSOR_ICELAKE_SERVER, + PROCESSOR_CASCADELAKE, + PROCESSOR_TIGERLAKE, + PROCESSOR_COOPERLAKE, + PROCESSOR_SAPPHIRERAPIDS, + PROCESSOR_ALDERLAKE, + PROCESSOR_ROCKETLAKE, + PROCESSOR_INTEL, + PROCESSOR_GEODE, + PROCESSOR_K6, + PROCESSOR_ATHLON, + PROCESSOR_K8, + PROCESSOR_AMDFAM10, + PROCESSOR_BDVER1, + PROCESSOR_BDVER2, + PROCESSOR_BDVER3, + PROCESSOR_BDVER4, + PROCESSOR_BTVER1, + PROCESSOR_BTVER2, + PROCESSOR_ZNVER1, + PROCESSOR_ZNVER2, + PROCESSOR_ZNVER3, + PROCESSOR_max +}; + +#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS) +extern const char *const processor_names[]; + +#include "wide-int-bitmask.h" + +enum pta_flag +{ +#define DEF_PTA(NAME) _ ## NAME, +#include "i386-isa.def" +#undef DEF_PTA + END_PTA +}; + +/* wide_int_bitmask can handle only 128 flags. */ +STATIC_ASSERT (END_PTA <= 128); + +#define WIDE_INT_BITMASK_FROM_NTH(N) (N < 64 ? wide_int_bitmask (0, 1ULL << N) \ + : wide_int_bitmask (1ULL << (N - 64), 0)) + +#define DEF_PTA(NAME) constexpr wide_int_bitmask PTA_ ## NAME \ + = WIDE_INT_BITMASK_FROM_NTH ((pta_flag) _ ## NAME); +#include "i386-isa.def" +#undef DEF_PTA + +constexpr wide_int_bitmask PTA_X86_64_BASELINE = PTA_64BIT | PTA_MMX | PTA_SSE + | PTA_SSE2 | PTA_NO_SAHF | PTA_FXSR; +constexpr wide_int_bitmask PTA_X86_64_V2 = (PTA_X86_64_BASELINE + & (~PTA_NO_SAHF)) + | PTA_CX16 | PTA_POPCNT | PTA_SSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_SSSE3; +constexpr wide_int_bitmask PTA_X86_64_V3 = PTA_X86_64_V2 + | PTA_AVX | PTA_AVX2 | PTA_BMI | PTA_BMI2 | PTA_F16C | PTA_FMA | PTA_LZCNT + | PTA_MOVBE | PTA_XSAVE; +constexpr wide_int_bitmask PTA_X86_64_V4 = PTA_X86_64_V3 + | PTA_AVX512F | PTA_AVX512BW | PTA_AVX512CD | PTA_AVX512DQ | PTA_AVX512VL; + +constexpr wide_int_bitmask PTA_CORE2 = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 + | PTA_SSE3 | PTA_SSSE3 | PTA_CX16 | PTA_FXSR; +constexpr wide_int_bitmask PTA_NEHALEM = PTA_CORE2 | PTA_SSE4_1 | PTA_SSE4_2 + | PTA_POPCNT; +constexpr wide_int_bitmask PTA_WESTMERE = PTA_NEHALEM | PTA_PCLMUL; +constexpr wide_int_bitmask PTA_SANDYBRIDGE = PTA_WESTMERE | PTA_AVX | PTA_XSAVE + | PTA_XSAVEOPT; +constexpr wide_int_bitmask PTA_IVYBRIDGE = PTA_SANDYBRIDGE | PTA_FSGSBASE + | PTA_RDRND | PTA_F16C; +constexpr wide_int_bitmask PTA_HASWELL = PTA_IVYBRIDGE | PTA_AVX2 | PTA_BMI + | PTA_BMI2 | PTA_LZCNT | PTA_FMA | PTA_MOVBE | PTA_HLE; +constexpr wide_int_bitmask PTA_BROADWELL = PTA_HASWELL | PTA_ADX | PTA_RDSEED + | PTA_PRFCHW; +constexpr wide_int_bitmask PTA_SKYLAKE = PTA_BROADWELL | PTA_AES + | PTA_CLFLUSHOPT | PTA_XSAVEC | PTA_XSAVES | PTA_SGX; +constexpr wide_int_bitmask PTA_SKYLAKE_AVX512 = PTA_SKYLAKE | PTA_AVX512F + | PTA_AVX512CD | PTA_AVX512VL | PTA_AVX512BW | PTA_AVX512DQ | PTA_PKU + | PTA_CLWB; +constexpr wide_int_bitmask PTA_CASCADELAKE = PTA_SKYLAKE_AVX512 + | PTA_AVX512VNNI; +constexpr wide_int_bitmask PTA_COOPERLAKE = PTA_CASCADELAKE | PTA_AVX512BF16; +constexpr wide_int_bitmask PTA_CANNONLAKE = PTA_SKYLAKE | PTA_AVX512F + | PTA_AVX512CD | PTA_AVX512VL | PTA_AVX512BW | PTA_AVX512DQ | PTA_PKU + | PTA_AVX512VBMI | PTA_AVX512IFMA | PTA_SHA; +constexpr wide_int_bitmask PTA_ICELAKE_CLIENT = PTA_CANNONLAKE | PTA_AVX512VNNI + | PTA_GFNI | PTA_VAES | PTA_AVX512VBMI2 | PTA_VPCLMULQDQ | PTA_AVX512BITALG + | PTA_RDPID | PTA_AVX512VPOPCNTDQ; +constexpr wide_int_bitmask PTA_ROCKETLAKE = PTA_ICELAKE_CLIENT & ~PTA_SGX; +constexpr wide_int_bitmask PTA_ICELAKE_SERVER = PTA_ICELAKE_CLIENT + | PTA_PCONFIG | PTA_WBNOINVD | PTA_CLWB; +constexpr wide_int_bitmask PTA_TIGERLAKE = PTA_ICELAKE_CLIENT | PTA_MOVDIRI + | PTA_MOVDIR64B | PTA_CLWB | PTA_AVX512VP2INTERSECT | PTA_KL | PTA_WIDEKL; +constexpr wide_int_bitmask PTA_SAPPHIRERAPIDS = PTA_ICELAKE_SERVER | PTA_MOVDIRI + | PTA_MOVDIR64B | PTA_AVX512VP2INTERSECT | PTA_ENQCMD | PTA_CLDEMOTE + | PTA_PTWRITE | PTA_WAITPKG | PTA_SERIALIZE | PTA_TSXLDTRK | PTA_AMX_TILE + | PTA_AMX_INT8 | PTA_AMX_BF16 | PTA_UINTR | PTA_AVXVNNI | PTA_AVX512FP16 + | PTA_AVX512BF16; +constexpr wide_int_bitmask PTA_KNL = PTA_BROADWELL | PTA_AVX512PF + | PTA_AVX512ER | PTA_AVX512F | PTA_AVX512CD | PTA_PREFETCHWT1; +constexpr wide_int_bitmask PTA_BONNELL = PTA_CORE2 | PTA_MOVBE; +constexpr wide_int_bitmask PTA_SILVERMONT = PTA_WESTMERE | PTA_MOVBE + | PTA_RDRND | PTA_PRFCHW; +constexpr wide_int_bitmask PTA_GOLDMONT = PTA_SILVERMONT | PTA_AES | PTA_SHA + | PTA_XSAVE | PTA_RDSEED | PTA_XSAVEC | PTA_XSAVES | PTA_CLFLUSHOPT + | PTA_XSAVEOPT | PTA_FSGSBASE; +constexpr wide_int_bitmask PTA_GOLDMONT_PLUS = PTA_GOLDMONT | PTA_RDPID + | PTA_SGX | PTA_PTWRITE; +constexpr wide_int_bitmask PTA_TREMONT = PTA_GOLDMONT_PLUS | PTA_CLWB + | PTA_GFNI | PTA_MOVDIRI | PTA_MOVDIR64B | PTA_CLDEMOTE | PTA_WAITPKG; +constexpr wide_int_bitmask PTA_ALDERLAKE = PTA_TREMONT | PTA_ADX | PTA_AVX + | PTA_AVX2 | PTA_BMI | PTA_BMI2 | PTA_F16C | PTA_FMA | PTA_LZCNT + | PTA_PCONFIG | PTA_PKU | PTA_VAES | PTA_VPCLMULQDQ | PTA_SERIALIZE + | PTA_HRESET | PTA_KL | PTA_WIDEKL | PTA_AVXVNNI; +constexpr wide_int_bitmask PTA_KNM = PTA_KNL | PTA_AVX5124VNNIW + | PTA_AVX5124FMAPS | PTA_AVX512VPOPCNTDQ; + +#ifndef GENERATOR_FILE + +#include "insn-attr-common.h" + +#include "common/config/i386/i386-cpuinfo.h" + +class pta +{ +public: + const char *const name; /* processor name or nickname. */ + const enum processor_type processor; + const enum attr_cpu schedule; + const wide_int_bitmask flags; + const int model; + const enum feature_priority priority; +}; + +extern const pta processor_alias_table[]; +extern unsigned int const pta_size; +extern unsigned int const num_arch_names; +#endif + +#endif + +extern enum processor_type ix86_tune; +extern enum processor_type ix86_arch; + +/* Size of the RED_ZONE area. */ +#define RED_ZONE_SIZE 128 +/* Reserved area of the red zone for temporaries. */ +#define RED_ZONE_RESERVE 8 + +extern unsigned int ix86_preferred_stack_boundary; +extern unsigned int ix86_incoming_stack_boundary; + +/* Smallest class containing REGNO. */ +extern enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER]; + +enum ix86_fpcmp_strategy { + IX86_FPCMP_SAHF, + IX86_FPCMP_COMI, + IX86_FPCMP_ARITH +}; + +/* To properly truncate FP values into integers, we need to set i387 control + word. We can't emit proper mode switching code before reload, as spills + generated by reload may truncate values incorrectly, but we still can avoid + redundant computation of new control word by the mode switching pass. + The fldcw instructions are still emitted redundantly, but this is probably + not going to be noticeable problem, as most CPUs do have fast path for + the sequence. + + The machinery is to emit simple truncation instructions and split them + before reload to instructions having USEs of two memory locations that + are filled by this code to old and new control word. + + Post-reload pass may be later used to eliminate the redundant fildcw if + needed. */ + +enum ix86_stack_slot +{ + SLOT_TEMP = 0, + SLOT_CW_STORED, + SLOT_CW_ROUNDEVEN, + SLOT_CW_TRUNC, + SLOT_CW_FLOOR, + SLOT_CW_CEIL, + SLOT_STV_TEMP, + SLOT_FLOATxFDI_387, + MAX_386_STACK_LOCALS +}; + +enum ix86_entity +{ + X86_DIRFLAG = 0, + AVX_U128, + I387_ROUNDEVEN, + I387_TRUNC, + I387_FLOOR, + I387_CEIL, + MAX_386_ENTITIES +}; + +enum x86_dirflag_state +{ + X86_DIRFLAG_RESET, + X86_DIRFLAG_ANY +}; + +enum avx_u128_state +{ + AVX_U128_CLEAN, + AVX_U128_DIRTY, + AVX_U128_ANY +}; + +/* Define this macro if the port needs extra instructions inserted + for mode switching in an optimizing compilation. */ + +#define OPTIMIZE_MODE_SWITCHING(ENTITY) \ + ix86_optimize_mode_switching[(ENTITY)] + +/* If you define `OPTIMIZE_MODE_SWITCHING', you have to define this as + initializer for an array of integers. Each initializer element N + refers to an entity that needs mode switching, and specifies the + number of different modes that might need to be set for this + entity. The position of the initializer in the initializer - + starting counting at zero - determines the integer that is used to + refer to the mode-switched entity in question. */ + +#define NUM_MODES_FOR_MODE_SWITCHING \ + { X86_DIRFLAG_ANY, AVX_U128_ANY, \ + I387_CW_ANY, I387_CW_ANY, I387_CW_ANY, I387_CW_ANY } + + +/* Avoid renaming of stack registers, as doing so in combination with + scheduling just increases amount of live registers at time and in + the turn amount of fxch instructions needed. + + ??? Maybe Pentium chips benefits from renaming, someone can try.... + + Don't rename evex to non-evex sse registers. */ + +#define HARD_REGNO_RENAME_OK(SRC, TARGET) \ + (!STACK_REGNO_P (SRC) \ + && EXT_REX_SSE_REGNO_P (SRC) == EXT_REX_SSE_REGNO_P (TARGET)) + + +#define FASTCALL_PREFIX '@' + +#ifndef USED_FOR_TARGET +/* Structure describing stack frame layout. + Stack grows downward: + + [arguments] + <- ARG_POINTER + saved pc + + saved static chain if ix86_static_chain_on_stack + + saved frame pointer if frame_pointer_needed + <- HARD_FRAME_POINTER + [saved regs] + <- reg_save_offset + [padding0] + <- stack_realign_offset + [saved SSE regs] + OR + [stub-saved registers for ms x64 --> sysv clobbers + <- Start of out-of-line, stub-saved/restored regs + (see libgcc/config/i386/(sav|res)ms64*.S) + [XMM6-15] + [RSI] + [RDI] + [?RBX] only if RBX is clobbered + [?RBP] only if RBP and RBX are clobbered + [?R12] only if R12 and all previous regs are clobbered + [?R13] only if R13 and all previous regs are clobbered + [?R14] only if R14 and all previous regs are clobbered + [?R15] only if R15 and all previous regs are clobbered + <- end of stub-saved/restored regs + [padding1] + ] + <- sse_reg_save_offset + [padding2] + | <- FRAME_POINTER + [va_arg registers] | + | + [frame] | + | + [padding2] | = to_allocate + <- STACK_POINTER + */ +struct GTY(()) ix86_frame +{ + int nsseregs; + int nregs; + int va_arg_size; + int red_zone_size; + int outgoing_arguments_size; + + /* The offsets relative to ARG_POINTER. */ + HOST_WIDE_INT frame_pointer_offset; + HOST_WIDE_INT hard_frame_pointer_offset; + HOST_WIDE_INT stack_pointer_offset; + HOST_WIDE_INT hfp_save_offset; + HOST_WIDE_INT reg_save_offset; + HOST_WIDE_INT stack_realign_allocate; + HOST_WIDE_INT stack_realign_offset; + HOST_WIDE_INT sse_reg_save_offset; + + /* When save_regs_using_mov is set, emit prologue using + move instead of push instructions. */ + bool save_regs_using_mov; + + /* Assume without checking that: + EXPENSIVE_P = expensive_function_p (EXPENSIVE_COUNT). */ + bool expensive_p; + int expensive_count; +}; + +/* Machine specific frame tracking during prologue/epilogue generation. All + values are positive, but since the x86 stack grows downward, are subtratced + from the CFA to produce a valid address. */ + +struct GTY(()) machine_frame_state +{ + /* This pair tracks the currently active CFA as reg+offset. When reg + is drap_reg, we don't bother trying to record here the real CFA when + it might really be a DW_CFA_def_cfa_expression. */ + rtx cfa_reg; + HOST_WIDE_INT cfa_offset; + + /* The current offset (canonically from the CFA) of ESP and EBP. + When stack frame re-alignment is active, these may not be relative + to the CFA. However, in all cases they are relative to the offsets + of the saved registers stored in ix86_frame. */ + HOST_WIDE_INT sp_offset; + HOST_WIDE_INT fp_offset; + + /* The size of the red-zone that may be assumed for the purposes of + eliding register restore notes in the epilogue. This may be zero + if no red-zone is in effect, or may be reduced from the real + red-zone value by a maximum runtime stack re-alignment value. */ + int red_zone_offset; + + /* Indicate whether each of ESP, EBP or DRAP currently holds a valid + value within the frame. If false then the offset above should be + ignored. Note that DRAP, if valid, *always* points to the CFA and + thus has an offset of zero. */ + BOOL_BITFIELD sp_valid : 1; + BOOL_BITFIELD fp_valid : 1; + BOOL_BITFIELD drap_valid : 1; + + /* Indicate whether the local stack frame has been re-aligned. When + set, the SP/FP offsets above are relative to the aligned frame + and not the CFA. */ + BOOL_BITFIELD realigned : 1; + + /* Indicates whether the stack pointer has been re-aligned. When set, + SP/FP continue to be relative to the CFA, but the stack pointer + should only be used for offsets > sp_realigned_offset, while + the frame pointer should be used for offsets <= sp_realigned_fp_last. + The flags realigned and sp_realigned are mutually exclusive. */ + BOOL_BITFIELD sp_realigned : 1; + + /* If sp_realigned is set, this is the last valid offset from the CFA + that can be used for access with the frame pointer. */ + HOST_WIDE_INT sp_realigned_fp_last; + + /* If sp_realigned is set, this is the offset from the CFA that the stack + pointer was realigned, and may or may not be equal to sp_realigned_fp_last. + Access via the stack pointer is only valid for offsets that are greater than + this value. */ + HOST_WIDE_INT sp_realigned_offset; +}; + +/* Private to winnt.cc. */ +struct seh_frame_state; + +enum function_type +{ + TYPE_UNKNOWN = 0, + TYPE_NORMAL, + /* The current function is an interrupt service routine with a + pointer argument as specified by the "interrupt" attribute. */ + TYPE_INTERRUPT, + /* The current function is an interrupt service routine with a + pointer argument and an integer argument as specified by the + "interrupt" attribute. */ + TYPE_EXCEPTION +}; + +enum queued_insn_type +{ + TYPE_NONE = 0, + TYPE_ENDBR, + TYPE_PATCHABLE_AREA +}; + +struct GTY(()) machine_function { + struct stack_local_entry *stack_locals; + int varargs_gpr_size; + int varargs_fpr_size; + int optimize_mode_switching[MAX_386_ENTITIES]; + + /* Cached initial frame layout for the current function. */ + struct ix86_frame frame; + + /* For -fsplit-stack support: A stack local which holds a pointer to + the stack arguments for a function with a variable number of + arguments. This is set at the start of the function and is used + to initialize the overflow_arg_area field of the va_list + structure. */ + rtx split_stack_varargs_pointer; + + /* This value is used for amd64 targets and specifies the current abi + to be used. MS_ABI means ms abi. Otherwise SYSV_ABI means sysv abi. */ + ENUM_BITFIELD(calling_abi) call_abi : 8; + + /* Nonzero if the function accesses a previous frame. */ + BOOL_BITFIELD accesses_prev_frame : 1; + + /* Set by ix86_compute_frame_layout and used by prologue/epilogue + expander to determine the style used. */ + BOOL_BITFIELD use_fast_prologue_epilogue : 1; + + /* Nonzero if the current function calls pc thunk and + must not use the red zone. */ + BOOL_BITFIELD pc_thunk_call_expanded : 1; + + /* If true, the current function needs the default PIC register, not + an alternate register (on x86) and must not use the red zone (on + x86_64), even if it's a leaf function. We don't want the + function to be regarded as non-leaf because TLS calls need not + affect register allocation. This flag is set when a TLS call + instruction is expanded within a function, and never reset, even + if all such instructions are optimized away. Use the + ix86_current_function_calls_tls_descriptor macro for a better + approximation. */ + BOOL_BITFIELD tls_descriptor_call_expanded_p : 1; + + /* If true, the current function has a STATIC_CHAIN is placed on the + stack below the return address. */ + BOOL_BITFIELD static_chain_on_stack : 1; + + /* If true, it is safe to not save/restore DRAP register. */ + BOOL_BITFIELD no_drap_save_restore : 1; + + /* Function type. */ + ENUM_BITFIELD(function_type) func_type : 2; + + /* How to generate indirec branch. */ + ENUM_BITFIELD(indirect_branch) indirect_branch_type : 3; + + /* If true, the current function has local indirect jumps, like + "indirect_jump" or "tablejump". */ + BOOL_BITFIELD has_local_indirect_jump : 1; + + /* How to generate function return. */ + ENUM_BITFIELD(indirect_branch) function_return_type : 3; + + /* If true, the current function is a function specified with + the "interrupt" or "no_caller_saved_registers" attribute. */ + BOOL_BITFIELD no_caller_saved_registers : 1; + + /* If true, there is register available for argument passing. This + is used only in ix86_function_ok_for_sibcall by 32-bit to determine + if there is scratch register available for indirect sibcall. In + 64-bit, rax, r10 and r11 are scratch registers which aren't used to + pass arguments and can be used for indirect sibcall. */ + BOOL_BITFIELD arg_reg_available : 1; + + /* If true, we're out-of-lining reg save/restore for regs clobbered + by 64-bit ms_abi functions calling a sysv_abi function. */ + BOOL_BITFIELD call_ms2sysv : 1; + + /* If true, the incoming 16-byte aligned stack has an offset (of 8) and + needs padding prior to out-of-line stub save/restore area. */ + BOOL_BITFIELD call_ms2sysv_pad_in : 1; + + /* This is the number of extra registers saved by stub (valid range is + 0-6). Each additional register is only saved/restored by the stubs + if all successive ones are. (Will always be zero when using a hard + frame pointer.) */ + unsigned int call_ms2sysv_extra_regs:3; + + /* Nonzero if the function places outgoing arguments on stack. */ + BOOL_BITFIELD outgoing_args_on_stack : 1; + + /* If true, ENDBR or patchable area is queued at function entrance. */ + ENUM_BITFIELD(queued_insn_type) insn_queued_at_entrance : 2; + + /* If true, the function label has been emitted. */ + BOOL_BITFIELD function_label_emitted : 1; + + /* True if the function needs a stack frame. */ + BOOL_BITFIELD stack_frame_required : 1; + + /* True if we should act silently, rather than raise an error for + invalid calls. */ + BOOL_BITFIELD silent_p : 1; + + /* True if red zone is used. */ + BOOL_BITFIELD red_zone_used : 1; + + /* The largest alignment, in bytes, of stack slot actually used. */ + unsigned int max_used_stack_alignment; + + /* During prologue/epilogue generation, the current frame state. + Otherwise, the frame state at the end of the prologue. */ + struct machine_frame_state fs; + + /* During SEH output, this is non-null. */ + struct seh_frame_state * GTY((skip(""))) seh; +}; + +extern GTY(()) tree sysv_va_list_type_node; +extern GTY(()) tree ms_va_list_type_node; +#endif + +#define ix86_stack_locals (cfun->machine->stack_locals) +#define ix86_varargs_gpr_size (cfun->machine->varargs_gpr_size) +#define ix86_varargs_fpr_size (cfun->machine->varargs_fpr_size) +#define ix86_optimize_mode_switching (cfun->machine->optimize_mode_switching) +#define ix86_pc_thunk_call_expanded (cfun->machine->pc_thunk_call_expanded) +#define ix86_tls_descriptor_calls_expanded_in_cfun \ + (cfun->machine->tls_descriptor_call_expanded_p) +/* Since tls_descriptor_call_expanded is not cleared, even if all TLS + calls are optimized away, we try to detect cases in which it was + optimized away. Since such instructions (use (reg REG_SP)), we can + verify whether there's any such instruction live by testing that + REG_SP is live. */ +#define ix86_current_function_calls_tls_descriptor \ + (ix86_tls_descriptor_calls_expanded_in_cfun && df_regs_ever_live_p (SP_REG)) +#define ix86_static_chain_on_stack (cfun->machine->static_chain_on_stack) +#define ix86_red_zone_used (cfun->machine->red_zone_used) + +/* Control behavior of x86_file_start. */ +#define X86_FILE_START_VERSION_DIRECTIVE false +#define X86_FILE_START_FLTUSED false + +/* Flag to mark data that is in the large address area. */ +#define SYMBOL_FLAG_FAR_ADDR (SYMBOL_FLAG_MACH_DEP << 0) +#define SYMBOL_REF_FAR_ADDR_P(X) \ + ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_FAR_ADDR) != 0) + +/* Flags to mark dllimport/dllexport. Used by PE ports, but handy to + have defined always, to avoid ifdefing. */ +#define SYMBOL_FLAG_DLLIMPORT (SYMBOL_FLAG_MACH_DEP << 1) +#define SYMBOL_REF_DLLIMPORT_P(X) \ + ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLIMPORT) != 0) + +#define SYMBOL_FLAG_DLLEXPORT (SYMBOL_FLAG_MACH_DEP << 2) +#define SYMBOL_REF_DLLEXPORT_P(X) \ + ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLEXPORT) != 0) + +#define SYMBOL_FLAG_STUBVAR (SYMBOL_FLAG_MACH_DEP << 4) +#define SYMBOL_REF_STUBVAR_P(X) \ + ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_STUBVAR) != 0) + +extern void debug_ready_dispatch (void); +extern void debug_dispatch_window (int); + +/* The value at zero is only defined for the BMI instructions + LZCNT and TZCNT, not the BSR/BSF insns in the original isa. */ +#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ + ((VALUE) = GET_MODE_BITSIZE (MODE), TARGET_BMI ? 2 : 0) +#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ + ((VALUE) = GET_MODE_BITSIZE (MODE), TARGET_LZCNT ? 2 : 0) + + +/* Flags returned by ix86_get_callcvt (). */ +#define IX86_CALLCVT_CDECL 0x1 +#define IX86_CALLCVT_STDCALL 0x2 +#define IX86_CALLCVT_FASTCALL 0x4 +#define IX86_CALLCVT_THISCALL 0x8 +#define IX86_CALLCVT_REGPARM 0x10 +#define IX86_CALLCVT_SSEREGPARM 0x20 + +#define IX86_BASE_CALLCVT(FLAGS) \ + ((FLAGS) & (IX86_CALLCVT_CDECL | IX86_CALLCVT_STDCALL \ + | IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) + +#define RECIP_MASK_NONE 0x00 +#define RECIP_MASK_DIV 0x01 +#define RECIP_MASK_SQRT 0x02 +#define RECIP_MASK_VEC_DIV 0x04 +#define RECIP_MASK_VEC_SQRT 0x08 +#define RECIP_MASK_ALL (RECIP_MASK_DIV | RECIP_MASK_SQRT \ + | RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_SQRT) +#define RECIP_MASK_DEFAULT (RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_SQRT) + +#define TARGET_RECIP_DIV ((recip_mask & RECIP_MASK_DIV) != 0) +#define TARGET_RECIP_SQRT ((recip_mask & RECIP_MASK_SQRT) != 0) +#define TARGET_RECIP_VEC_DIV ((recip_mask & RECIP_MASK_VEC_DIV) != 0) +#define TARGET_RECIP_VEC_SQRT ((recip_mask & RECIP_MASK_VEC_SQRT) != 0) + +/* Use 128-bit AVX instructions in the auto-vectorizer. */ +#define TARGET_PREFER_AVX128 (prefer_vector_width_type == PVW_AVX128) +/* Use 256-bit AVX instructions in the auto-vectorizer. */ +#define TARGET_PREFER_AVX256 (TARGET_PREFER_AVX128 \ + || prefer_vector_width_type == PVW_AVX256) + +#define TARGET_INDIRECT_BRANCH_REGISTER \ + (ix86_indirect_branch_register \ + || cfun->machine->indirect_branch_type != indirect_branch_keep) + +#define IX86_HLE_ACQUIRE (1 << 16) +#define IX86_HLE_RELEASE (1 << 17) + +/* For switching between functions with different target attributes. */ +#define SWITCHABLE_TARGET 1 + +#define TARGET_SUPPORTS_WIDE_INT 1 + +#if !defined(GENERATOR_FILE) && !defined(IN_LIBGCC2) +extern enum attr_cpu ix86_schedule; + +#define NUM_X86_64_MS_CLOBBERED_REGS 12 +#endif + +/* __builtin_eh_return can't handle stack realignment, so disable MMX/SSE + in 32-bit libgcc functions that call it. */ +#ifndef __x86_64__ +#define LIBGCC2_UNWIND_ATTRIBUTE __attribute__((target ("no-mmx,no-sse"))) +#endif + +/* +Local variables: +version-control: t +End: +*/ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/linux-common.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/linux-common.h new file mode 100644 index 0000000..efa7fb2 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/linux-common.h @@ -0,0 +1,71 @@ +/* Definitions for Intel 386 running Linux-based GNU systems with ELF format. + Copyright (C) 2012-2022 Free Software Foundation, Inc. + Contributed by Ilya Enkovich. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + GNU_USER_TARGET_OS_CPP_BUILTINS(); \ + ANDROID_TARGET_OS_CPP_BUILTINS(); \ + } \ + while (0) + +#define EXTRA_TARGET_D_OS_VERSIONS() \ + ANDROID_TARGET_D_OS_VERSIONS(); + +#undef CC1_SPEC +#define CC1_SPEC \ + LINUX_OR_ANDROID_CC (GNU_USER_TARGET_CC1_SPEC, \ + GNU_USER_TARGET_CC1_SPEC " " ANDROID_CC1_SPEC) + +#undef LINK_SPEC +#define LINK_SPEC \ + LINUX_OR_ANDROID_LD (GNU_USER_TARGET_LINK_SPEC, \ + GNU_USER_TARGET_LINK_SPEC " " ANDROID_LINK_SPEC) + +#undef LIB_SPEC +#define LIB_SPEC \ + LINUX_OR_ANDROID_LD (GNU_USER_TARGET_LIB_SPEC, \ + GNU_USER_TARGET_NO_PTHREADS_LIB_SPEC " " ANDROID_LIB_SPEC) + +#undef STARTFILE_SPEC +#define STARTFILE_SPEC \ + LINUX_OR_ANDROID_LD (GNU_USER_TARGET_STARTFILE_SPEC, \ + ANDROID_STARTFILE_SPEC) + +#undef ENDFILE_SPEC +#define ENDFILE_SPEC \ + LINUX_OR_ANDROID_LD (GNU_USER_TARGET_MATHFILE_SPEC " " \ + GNU_USER_TARGET_ENDFILE_SPEC, \ + GNU_USER_TARGET_MATHFILE_SPEC " " \ + ANDROID_ENDFILE_SPEC) + +#ifdef HAVE_LD_PUSHPOPSTATE_SUPPORT +#define MPX_LD_AS_NEEDED_GUARD_PUSH "--push-state --no-as-needed" +#define MPX_LD_AS_NEEDED_GUARD_POP "--pop-state" +#else +#define MPX_LD_AS_NEEDED_GUARD_PUSH "" +#define MPX_LD_AS_NEEDED_GUARD_POP "" +#endif + +extern void file_end_indicate_exec_stack_and_gnu_property (void); + +#undef TARGET_ASM_FILE_END +#define TARGET_ASM_FILE_END file_end_indicate_exec_stack_and_gnu_property diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/linux64.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/linux64.h new file mode 100644 index 0000000..8681e36 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/linux64.h @@ -0,0 +1,39 @@ +/* Definitions for AMD x86-64 running Linux-based GNU systems with ELF format. + Copyright (C) 2001-2022 Free Software Foundation, Inc. + Contributed by Jan Hubicka , based on linux.h. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#define GNU_USER_LINK_EMULATION32 "elf_i386" +#define GNU_USER_LINK_EMULATION64 "elf_x86_64" +#define GNU_USER_LINK_EMULATIONX32 "elf32_x86_64" + +#define GLIBC_DYNAMIC_LINKER32 "/lib/ld-linux.so.2" +#define GLIBC_DYNAMIC_LINKER64 "/lib64/ld-linux-x86-64.so.2" +#define GLIBC_DYNAMIC_LINKERX32 "/libx32/ld-linux-x32.so.2" + +#undef MUSL_DYNAMIC_LINKER32 +#define MUSL_DYNAMIC_LINKER32 "/lib/ld-musl-i386.so.1" +#undef MUSL_DYNAMIC_LINKER64 +#define MUSL_DYNAMIC_LINKER64 "/lib/ld-musl-x86_64.so.1" +#undef MUSL_DYNAMIC_LINKERX32 +#define MUSL_DYNAMIC_LINKERX32 "/lib/ld-musl-x32.so.1" diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/stringop.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/stringop.def new file mode 100644 index 0000000..e35140e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/stringop.def @@ -0,0 +1,28 @@ +/* Definitions for stringop strategy for IA-32. + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the files COPYING3. If not, +see . */ + +DEF_ALG (no_stringop, no_stringop) +DEF_ALG (libcall, libcall) +DEF_ALG (rep_prefix_1_byte, rep_byte) +DEF_ALG (rep_prefix_4_byte, rep_4byte) +DEF_ALG (rep_prefix_8_byte, rep_8byte) +DEF_ALG (loop_1_byte, byte_loop) +DEF_ALG (loop, loop) +DEF_ALG (unrolled_loop, unrolled_loop) +DEF_ALG (vector_loop, vector_loop) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/unix.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/unix.h new file mode 100644 index 0000000..582d65b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/unix.h @@ -0,0 +1,80 @@ +/* Definitions for Unix assembler syntax for the Intel 80386. + Copyright (C) 1988-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* This file defines the aspects of assembler syntax + that are the same for all the i386 Unix systems + (though they may differ in non-Unix systems). */ + +/* Define macro used to output shift-double opcodes when the shift + count is in %cl. Some assemblers require %cl as an argument; + some don't. This macro controls what to do: by default, don't + print %cl. */ +#define SHIFT_DOUBLE_OMITS_COUNT 1 + +/* Define the syntax of pseudo-ops, labels and comments. */ + +/* String containing the assembler's comment-starter. + Note the trailing space is necessary in case the character + that immediately follows the comment is '*'. If this happens + and the space is not there the assembler will interpret this + as the start of a C-like slash-star comment and complain when + there is no terminator. */ + +#define ASM_COMMENT_START "/ " + +/* Output to assembler file text saying following lines + may contain character constants, extra white space, comments, etc. */ + +#define ASM_APP_ON "/APP\n" + +/* Output to assembler file text saying following lines + no longer contain unusual constructs. */ + +#define ASM_APP_OFF "/NO_APP\n" + +/* Output before read-only data. */ + +#define TEXT_SECTION_ASM_OP "\t.text" + +/* Output before writable (initialized) data. */ + +#define DATA_SECTION_ASM_OP "\t.data" + +/* Output before writable (uninitialized) data. */ + +#define BSS_SECTION_ASM_OP "\t.bss" + +/* Globalizing directive for a label. */ +#define GLOBAL_ASM_OP "\t.globl\t" + +/* By default, target has a 80387, uses IEEE compatible arithmetic, + and returns float values in the 387. */ +#undef TARGET_SUBTARGET_DEFAULT +#define TARGET_SUBTARGET_DEFAULT \ + (MASK_80387 | MASK_IEEE_FP | MASK_FLOAT_RETURNS) + +/* By default, 64-bit mode uses 128-bit long double. */ +#undef TARGET_SUBTARGET64_DEFAULT +#define TARGET_SUBTARGET64_DEFAULT \ + MASK_128BIT_LONG_DOUBLE diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/x86-64.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/x86-64.h new file mode 100644 index 0000000..0652699 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/x86-64.h @@ -0,0 +1,91 @@ +/* OS independent definitions for AMD x86-64. + Copyright (C) 2001-2022 Free Software Foundation, Inc. + Contributed by Bo Thorsen . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#undef ASM_COMMENT_START +#define ASM_COMMENT_START "#" + +#undef DBX_REGISTER_NUMBER +#define DBX_REGISTER_NUMBER(n) \ + (TARGET_64BIT ? dbx64_register_map[n] : svr4_dbx_register_map[n]) + +/* Output assembler code to FILE to call the profiler. */ +#define NO_PROFILE_COUNTERS 1 + +#undef MCOUNT_NAME +#define MCOUNT_NAME "mcount" + +#undef SIZE_TYPE +#define SIZE_TYPE (TARGET_LP64 ? "long unsigned int" : "unsigned int") + +#undef PTRDIFF_TYPE +#define PTRDIFF_TYPE (TARGET_LP64 ? "long int" : "int") + +#undef WCHAR_TYPE +#define WCHAR_TYPE "int" + +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE 32 + +#undef ASM_SPEC +#define ASM_SPEC "%{m16|m32:--32} %{m64:--64} %{mx32:--x32}" + +#undef ASM_OUTPUT_ALIGNED_BSS +#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \ + x86_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN) + +#undef ASM_OUTPUT_ALIGNED_DECL_COMMON +#define ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN) \ + x86_elf_aligned_decl_common (FILE, DECL, NAME, SIZE, ALIGN); + +#undef ASM_OUTPUT_ALIGNED_DECL_LOCAL +#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(FILE, DECL, NAME, SIZE, ALIGN) \ + do \ + { \ + fprintf ((FILE), "%s", LOCAL_ASM_OP); \ + assemble_name ((FILE), (NAME)); \ + fprintf ((FILE), "\n"); \ + ASM_OUTPUT_ALIGNED_DECL_COMMON (FILE, DECL, NAME, SIZE, ALIGN); \ + } \ + while (0) + +#define SUBALIGN_LOG 3 + +/* i386 System V Release 4 uses DWARF debugging info. + x86-64 ABI specifies DWARF2. */ + +#define DWARF2_DEBUGGING_INFO 1 +#define DWARF2_UNWIND_INFO 1 + +#undef PREFERRED_DEBUGGING_TYPE +#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG + +#undef TARGET_ASM_SELECT_SECTION +#define TARGET_ASM_SELECT_SECTION x86_64_elf_select_section + +#undef TARGET_ASM_UNIQUE_SECTION +#define TARGET_ASM_UNIQUE_SECTION x86_64_elf_unique_section + +#undef TARGET_SECTION_TYPE_FLAGS +#define TARGET_SECTION_TYPE_FLAGS x86_64_elf_section_type_flags diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/x86-tune.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/x86-tune.def new file mode 100644 index 0000000..d983e2f --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/i386/x86-tune.def @@ -0,0 +1,662 @@ +/* Definitions of x86 tunable features. + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* Tuning for a given CPU XXXX consists of: + - adding new CPU into: + - adding PROCESSOR_XXX to processor_type (in i386.h) + - possibly adding XXX into CPU attribute in i386.md + - adding XXX to processor_alias_table (in i386.cc) + - introducing ix86_XXX_cost in i386.cc + - Stringop generation table can be build based on test_stringop + - script (once rest of tuning is complete) + - designing a scheduler model in + - XXXX.md file + - Updating ix86_issue_rate and ix86_adjust_cost in i386.md + - possibly updating ia32_multipass_dfa_lookahead, ix86_sched_reorder + and ix86_sched_init_global if those tricks are needed. + - Tunning the flags bellow. Those are split into sections and each + section is very roughly ordered by importance. */ + +/*****************************************************************************/ +/* Scheduling flags. */ +/*****************************************************************************/ + +/* X86_TUNE_SCHEDULE: Enable scheduling. */ +DEF_TUNE (X86_TUNE_SCHEDULE, "schedule", + m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL | m_SILVERMONT + | m_INTEL | m_KNL | m_KNM | m_K6_GEODE | m_AMD_MULTIPLE | m_GOLDMONT + | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_PARTIAL_REG_DEPENDENCY: Enable more register renaming + on modern chips. Prefer stores affecting whole integer register + over partial stores. For example prefer MOVZBL or MOVQ to load 8bit + value over movb. */ +DEF_TUNE (X86_TUNE_PARTIAL_REG_DEPENDENCY, "partial_reg_dependency", + m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 + | m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL + | m_KNL | m_KNM | m_AMD_MULTIPLE | m_TREMONT | m_ALDERLAKE + | m_GENERIC) + +/* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: This knob promotes all store + destinations to be 128bit to allow register renaming on 128bit SSE units, + but usually results in one extra microop on 64bit SSE units. + Experimental results shows that disabling this option on P4 brings over 20% + SPECfp regression, while enabling it on K8 brings roughly 2.4% regression + that can be partly masked by careful scheduling of moves. */ +DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY, "sse_partial_reg_dependency", + m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10 + | m_BDVER | m_ZNVER | m_TREMONT | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY: This knob avoids + partial write to the destination in scalar SSE conversion from FP + to FP. */ +DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY, + "sse_partial_reg_fp_converts_dependency", + m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10 + | m_BDVER | m_ZNVER | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY: This knob avoids partial + write to the destination in scalar SSE conversion from integer to FP. */ +DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY, + "sse_partial_reg_converts_dependency", + m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10 + | m_BDVER | m_ZNVER | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_DEST_FALSE_DEP_FOR_GLC: This knob inserts zero-idiom before + several insns to break false dependency on the dest register for GLC + micro-architecture. */ +DEF_TUNE (X86_TUNE_DEST_FALSE_DEP_FOR_GLC, + "dest_false_dep_for_glc", m_SAPPHIRERAPIDS | m_ALDERLAKE) + +/* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies + are resolved on SSE register parts instead of whole registers, so we may + maintain just lower part of scalar values in proper format leaving the + upper part undefined. */ +DEF_TUNE (X86_TUNE_SSE_SPLIT_REGS, "sse_split_regs", m_ATHLON_K8) + +/* X86_TUNE_PARTIAL_FLAG_REG_STALL: this flag disables use of flags + set by instructions affecting just some flags (in particular shifts). + This is because Core2 resolves dependencies on whole flags register + and such sequences introduce false dependency on previous instruction + setting full flags. + + The flags does not affect generation of INC and DEC that is controlled + by X86_TUNE_USE_INCDEC. */ + +DEF_TUNE (X86_TUNE_PARTIAL_FLAG_REG_STALL, "partial_flag_reg_stall", + m_CORE2) + +/* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid + partial dependencies. */ +DEF_TUNE (X86_TUNE_MOVX, "movx", + m_PPRO | m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE + | m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_KNL | m_KNM | m_INTEL + | m_GOLDMONT_PLUS | m_GEODE | m_AMD_MULTIPLE + | m_CORE_AVX2 | m_TREMONT | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_MEMORY_MISMATCH_STALL: Avoid partial stores that are followed by + full sized loads. */ +DEF_TUNE (X86_TUNE_MEMORY_MISMATCH_STALL, "memory_mismatch_stall", + m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_INTEL + | m_KNL | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS | m_AMD_MULTIPLE + | m_TREMONT | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_FUSE_CMP_AND_BRANCH_32: Fuse compare with a subsequent + conditional jump instruction for 32 bit TARGET. */ +DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_32, "fuse_cmp_and_branch_32", + m_CORE_ALL | m_BDVER | m_ZNVER | m_GENERIC) + +/* X86_TUNE_FUSE_CMP_AND_BRANCH_64: Fuse compare with a subsequent + conditional jump instruction for TARGET_64BIT. */ +DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_64, "fuse_cmp_and_branch_64", + m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_BDVER + | m_ZNVER | m_GENERIC) + +/* X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS: Fuse compare with a + subsequent conditional jump instruction when the condition jump + check sign flag (SF) or overflow flag (OF). */ +DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS, "fuse_cmp_and_branch_soflags", + m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_BDVER + | m_ZNVER | m_GENERIC) + +/* X86_TUNE_FUSE_ALU_AND_BRANCH: Fuse alu with a subsequent conditional + jump instruction when the alu instruction produces the CCFLAG consumed by + the conditional jump instruction. */ +DEF_TUNE (X86_TUNE_FUSE_ALU_AND_BRANCH, "fuse_alu_and_branch", + m_SANDYBRIDGE | m_CORE_AVX2 | m_GENERIC) + + +/*****************************************************************************/ +/* Function prologue, epilogue and function calling sequences. */ +/*****************************************************************************/ + +/* X86_TUNE_ACCUMULATE_OUTGOING_ARGS: Allocate stack space for outgoing + arguments in prologue/epilogue instead of separately for each call + by push/pop instructions. + This increase code size by about 5% in 32bit mode, less so in 64bit mode + because parameters are passed in registers. It is considerable + win for targets without stack engine that prevents multple push operations + to happen in parallel. */ + +DEF_TUNE (X86_TUNE_ACCUMULATE_OUTGOING_ARGS, "accumulate_outgoing_args", + m_PPRO | m_P4_NOCONA | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL + | m_GOLDMONT | m_GOLDMONT_PLUS | m_ATHLON_K8) + +/* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in prologues that are + considered on critical path. */ +DEF_TUNE (X86_TUNE_PROLOGUE_USING_MOVE, "prologue_using_move", + m_PPRO | m_ATHLON_K8) + +/* X86_TUNE_PROLOGUE_USING_MOVE: Do not use push/pop in epilogues that are + considered on critical path. */ +DEF_TUNE (X86_TUNE_EPILOGUE_USING_MOVE, "epilogue_using_move", + m_PPRO | m_ATHLON_K8) + +/* X86_TUNE_USE_LEAVE: Use "leave" instruction in epilogues where it fits. */ +DEF_TUNE (X86_TUNE_USE_LEAVE, "use_leave", + m_386 | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE | m_TREMONT + | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_PUSH_MEMORY: Enable generation of "push mem" instructions. + Some chips, like 486 and Pentium works faster with separate load + and push instructions. */ +DEF_TUNE (X86_TUNE_PUSH_MEMORY, "push_memory", + m_386 | m_P4_NOCONA | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE + | m_TREMONT | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred + over esp subtraction. */ +DEF_TUNE (X86_TUNE_SINGLE_PUSH, "single_push", m_386 | m_486 | m_PENT + | m_LAKEMONT | m_K6_GEODE) + +/* X86_TUNE_DOUBLE_PUSH. Enable if double push insn is preferred + over esp subtraction. */ +DEF_TUNE (X86_TUNE_DOUBLE_PUSH, "double_push", m_PENT | m_LAKEMONT + | m_K6_GEODE) + +/* X86_TUNE_SINGLE_POP: Enable if single pop insn is preferred + over esp addition. */ +DEF_TUNE (X86_TUNE_SINGLE_POP, "single_pop", m_386 | m_486 | m_PENT + | m_LAKEMONT | m_PPRO) + +/* X86_TUNE_DOUBLE_POP: Enable if double pop insn is preferred + over esp addition. */ +DEF_TUNE (X86_TUNE_DOUBLE_POP, "double_pop", m_PENT | m_LAKEMONT) + +/*****************************************************************************/ +/* Branch predictor tuning */ +/*****************************************************************************/ + +/* X86_TUNE_PAD_SHORT_FUNCTION: Make every function to be at least 4 + instructions long. */ +DEF_TUNE (X86_TUNE_PAD_SHORT_FUNCTION, "pad_short_function", m_BONNELL) + +/* X86_TUNE_PAD_RETURNS: Place NOP before every RET that is a destination + of conditional jump or directly preceded by other jump instruction. + This is important for AND K8-AMDFAM10 because the branch prediction + architecture expect at most one jump per 2 byte window. Failing to + pad returns leads to misaligned return stack. */ +DEF_TUNE (X86_TUNE_PAD_RETURNS, "pad_returns", + m_ATHLON_K8 | m_AMDFAM10) + +/* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more + than 4 branch instructions in the 16 byte window. */ +DEF_TUNE (X86_TUNE_FOUR_JUMP_LIMIT, "four_jump_limit", + m_PPRO | m_P4_NOCONA | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM + | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL | m_ATHLON_K8 | m_AMDFAM10) + +/*****************************************************************************/ +/* Integer instruction selection tuning */ +/*****************************************************************************/ + +/* X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL: Enable software prefetching + at -O3. For the moment, the prefetching seems badly tuned for Intel + chips. */ +DEF_TUNE (X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL, "software_prefetching_beneficial", + m_K6_GEODE | m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER) + +/* X86_TUNE_LCP_STALL: Avoid an expensive length-changing prefix stall + on 16-bit immediate moves into memory on Core2 and Corei7. */ +DEF_TUNE (X86_TUNE_LCP_STALL, "lcp_stall", m_CORE_ALL | m_GENERIC) + +/* X86_TUNE_READ_MODIFY: Enable use of read-modify instructions such + as "add mem, reg". */ +DEF_TUNE (X86_TUNE_READ_MODIFY, "read_modify", ~(m_PENT | m_LAKEMONT | m_PPRO)) + +/* X86_TUNE_USE_INCDEC: Enable use of inc/dec instructions. + + Core2 and nehalem has stall of 7 cycles for partial flag register stalls. + Sandy bridge and Ivy bridge generate extra uop. On Haswell this extra uop + is output only when the values needs to be really merged, which is not + done by GCC generated code. */ +DEF_TUNE (X86_TUNE_USE_INCDEC, "use_incdec", + ~(m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE + | m_BONNELL | m_SILVERMONT | m_INTEL | m_KNL | m_KNM | m_GOLDMONT + | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_GENERIC)) + +/* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred + for DFmode copies */ +DEF_TUNE (X86_TUNE_INTEGER_DFMODE_MOVES, "integer_dfmode_moves", + ~(m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT + | m_KNL | m_KNM | m_INTEL | m_GEODE | m_AMD_MULTIPLE | m_GOLDMONT + | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_GENERIC)) + +/* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag + will impact LEA instruction selection. */ +DEF_TUNE (X86_TUNE_OPT_AGU, "opt_agu", m_BONNELL | m_SILVERMONT | m_KNL + | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL) + +/* X86_TUNE_AVOID_LEA_FOR_ADDR: Avoid lea for address computation. */ +DEF_TUNE (X86_TUNE_AVOID_LEA_FOR_ADDR, "avoid_lea_for_addr", + m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS + | m_KNL | m_KNM) + +/* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is + vector path on AMD machines. + FIXME: Do we need to enable this for core? */ +DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM32_MEM, "slow_imul_imm32_mem", + m_K8 | m_AMDFAM10) + +/* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD + machines. + FIXME: Do we need to enable this for core? */ +DEF_TUNE (X86_TUNE_SLOW_IMUL_IMM8, "slow_imul_imm8", + m_K8 | m_AMDFAM10) + +/* X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE: Try to avoid memory operands for + a conditional move. */ +DEF_TUNE (X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE, "avoid_mem_opnd_for_cmove", + m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_KNL + | m_KNM | m_INTEL) + +/* X86_TUNE_SINGLE_STRINGOP: Enable use of single string operations, such + as MOVS and STOS (without a REP prefix) to move/set sequences of bytes. */ +DEF_TUNE (X86_TUNE_SINGLE_STRINGOP, "single_stringop", m_386 | m_P4_NOCONA) + +/* X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB: Enable use of REP MOVSB/STOSB to + move/set sequences of bytes with known size. */ +DEF_TUNE (X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB, + "prefer_known_rep_movsb_stosb", + m_SKYLAKE | m_ALDERLAKE | m_TREMONT | m_CORE_AVX512) + +/* X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES: Enable generation of + compact prologues and epilogues by issuing a misaligned moves. This + requires target to handle misaligned moves and partial memory stalls + reasonably well. + FIXME: This may actualy be a win on more targets than listed here. */ +DEF_TUNE (X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES, + "misaligned_move_string_pro_epilogues", + m_386 | m_486 | m_CORE_ALL | m_AMD_MULTIPLE | m_TREMONT + | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_USE_SAHF: Controls use of SAHF. */ +DEF_TUNE (X86_TUNE_USE_SAHF, "use_sahf", + m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT + | m_KNL | m_KNM | m_INTEL | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER + | m_BTVER | m_ZNVER | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT + | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_USE_CLTD: Controls use of CLTD and CTQO instructions. */ +DEF_TUNE (X86_TUNE_USE_CLTD, "use_cltd", + ~(m_PENT | m_LAKEMONT | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL + | m_K6 | m_GOLDMONT | m_GOLDMONT_PLUS)) + +/* X86_TUNE_USE_BT: Enable use of BT (bit test) instructions. */ +DEF_TUNE (X86_TUNE_USE_BT, "use_bt", + m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL + | m_LAKEMONT | m_AMD_MULTIPLE | m_GOLDMONT | m_GOLDMONT_PLUS + | m_TREMONT | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_AVOID_FALSE_DEP_FOR_BMI: Avoid false dependency + for bit-manipulation instructions. */ +DEF_TUNE (X86_TUNE_AVOID_FALSE_DEP_FOR_BMI, "avoid_false_dep_for_bmi", + m_SANDYBRIDGE | m_CORE_AVX2 | m_TREMONT | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_ADJUST_UNROLL: This enables adjusting the unroll factor based + on hardware capabilities. Bdver3 hardware has a loop buffer which makes + unrolling small loop less important. For, such architectures we adjust + the unroll factor so that the unrolled loop fits the loop buffer. */ +DEF_TUNE (X86_TUNE_ADJUST_UNROLL, "adjust_unroll_factor", m_BDVER3 | m_BDVER4) + +/* X86_TUNE_ONE_IF_CONV_INSNS: Restrict a number of cmov insns in + if-converted sequence to one. */ +DEF_TUNE (X86_TUNE_ONE_IF_CONV_INSN, "one_if_conv_insn", + m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_CORE_ALL | m_GOLDMONT + | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_AVOID_MFENCE: Use lock prefixed instructions instead of mfence. */ +DEF_TUNE (X86_TUNE_AVOID_MFENCE, "avoid_mfence", + m_CORE_ALL | m_BDVER | m_ZNVER | m_TREMONT | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_EXPAND_ABS: This enables a new abs pattern by + generating instructions for abs (x) = (((signed) x >> (W-1) ^ x) - + (signed) x >> (W-1)) instead of cmove or SSE max/abs instructions. */ +DEF_TUNE (X86_TUNE_EXPAND_ABS, "expand_abs", + m_CORE_ALL | m_SILVERMONT | m_KNL | m_KNM | m_GOLDMONT + | m_GOLDMONT_PLUS) + +/*****************************************************************************/ +/* 387 instruction selection tuning */ +/*****************************************************************************/ + +/* X86_TUNE_USE_HIMODE_FIOP: Enables use of x87 instructions with 16bit + integer operand. + FIXME: Why this is disabled for modern chips? */ +DEF_TUNE (X86_TUNE_USE_HIMODE_FIOP, "use_himode_fiop", + m_386 | m_486 | m_K6_GEODE) + +/* X86_TUNE_USE_SIMODE_FIOP: Enables use of x87 instructions with 32bit + integer operand. */ +DEF_TUNE (X86_TUNE_USE_SIMODE_FIOP, "use_simode_fiop", + ~(m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL + | m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_AMD_MULTIPLE + | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE + | m_GENERIC)) + +/* X86_TUNE_USE_FFREEP: Use freep instruction instead of fstp. */ +DEF_TUNE (X86_TUNE_USE_FFREEP, "use_ffreep", m_AMD_MULTIPLE) + +/* X86_TUNE_EXT_80387_CONSTANTS: Use fancy 80387 constants, such as PI. */ +DEF_TUNE (X86_TUNE_EXT_80387_CONSTANTS, "ext_80387_constants", + m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT + | m_KNL | m_KNM | m_INTEL | m_K6_GEODE | m_ATHLON_K8 | m_GOLDMONT + | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_GENERIC) + +/*****************************************************************************/ +/* SSE instruction selection tuning */ +/*****************************************************************************/ + +/* X86_TUNE_GENERAL_REGS_SSE_SPILL: Try to spill general regs to SSE + regs instead of memory. */ +DEF_TUNE (X86_TUNE_GENERAL_REGS_SSE_SPILL, "general_regs_sse_spill", + m_CORE_ALL) + +/* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL: Use movups for misaligned loads instead + of a sequence loading registers by parts. */ +DEF_TUNE (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL, "sse_unaligned_load_optimal", + m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_SILVERMONT | m_KNL | m_KNM + | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE + | m_AMDFAM10 | m_BDVER | m_BTVER | m_ZNVER | m_GENERIC) + +/* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL: Use movups for misaligned stores + instead of a sequence loading registers by parts. */ +DEF_TUNE (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL, "sse_unaligned_store_optimal", + m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_SILVERMONT | m_KNL | m_KNM + | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS + | m_TREMONT | m_ALDERLAKE | m_BDVER | m_ZNVER | m_GENERIC) + +/* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL: Use packed single + precision 128bit instructions instead of double where possible. */ +DEF_TUNE (X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL, "sse_packed_single_insn_optimal", + m_BDVER | m_ZNVER) + +/* X86_TUNE_SSE_TYPELESS_STORES: Always movaps/movups for 128bit stores. */ +DEF_TUNE (X86_TUNE_SSE_TYPELESS_STORES, "sse_typeless_stores", + m_AMD_MULTIPLE | m_CORE_ALL | m_TREMONT | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_SSE_LOAD0_BY_PXOR: Always use pxor to load0 as opposed to + xorps/xorpd and other variants. */ +DEF_TUNE (X86_TUNE_SSE_LOAD0_BY_PXOR, "sse_load0_by_pxor", + m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BDVER | m_BTVER | m_ZNVER + | m_TREMONT | m_ALDERLAKE | m_GENERIC) + +/* X86_TUNE_INTER_UNIT_MOVES_TO_VEC: Enable moves in from integer + to SSE registers. If disabled, the moves will be done by storing + the value to memory and reloading. + Enable this flag for generic - the only relevant architecture preferring + no inter-unit moves is Buldozer. While this makes small regression on SPECfp + scores (sub 0.3%), disabling inter-unit moves penalizes noticeably hand + written vectorized code which use i.e. _mm_set_epi16. */ +DEF_TUNE (X86_TUNE_INTER_UNIT_MOVES_TO_VEC, "inter_unit_moves_to_vec", + ~(m_ATHLON_K8 | m_AMDFAM10 | m_BDVER | m_BTVER)) + +/* X86_TUNE_INTER_UNIT_MOVES_TO_VEC: Enable moves in from SSE + to integer registers. If disabled, the moves will be done by storing + the value to memory and reloading. */ +DEF_TUNE (X86_TUNE_INTER_UNIT_MOVES_FROM_VEC, "inter_unit_moves_from_vec", + ~m_ATHLON_K8) + +/* X86_TUNE_INTER_UNIT_CONVERSIONS: Enable float<->integer conversions + to use both SSE and integer registers at a same time. */ +DEF_TUNE (X86_TUNE_INTER_UNIT_CONVERSIONS, "inter_unit_conversions", + ~(m_AMDFAM10 | m_BDVER)) + +/* X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS: Try to split memory operand for + fp converts to destination register. */ +DEF_TUNE (X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS, "split_mem_opnd_for_fp_converts", + m_SILVERMONT | m_KNL | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS + | m_INTEL) + +/* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion + from FP to FP. This form of instructions avoids partial write to the + destination. */ +DEF_TUNE (X86_TUNE_USE_VECTOR_FP_CONVERTS, "use_vector_fp_converts", + m_AMDFAM10) + +/* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion + from integer to FP. */ +DEF_TUNE (X86_TUNE_USE_VECTOR_CONVERTS, "use_vector_converts", m_AMDFAM10) + +/* X86_TUNE_SLOW_SHUFB: Indicates tunings with slow pshufb instruction. */ +DEF_TUNE (X86_TUNE_SLOW_PSHUFB, "slow_pshufb", + m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_GOLDMONT + | m_GOLDMONT_PLUS | m_INTEL) + +/* X86_TUNE_AVOID_4BYTE_PREFIXES: Avoid instructions requiring 4+ bytes of prefixes. */ +DEF_TUNE (X86_TUNE_AVOID_4BYTE_PREFIXES, "avoid_4byte_prefixes", + m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE + | m_INTEL) + +/* X86_TUNE_USE_GATHER_2PARTS: Use gather instructions for vectors with 2 + elements. */ +DEF_TUNE (X86_TUNE_USE_GATHER_2PARTS, "use_gather_2parts", + ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ALDERLAKE | m_GENERIC)) + +/* X86_TUNE_USE_GATHER_4PARTS: Use gather instructions for vectors with 4 + elements. */ +DEF_TUNE (X86_TUNE_USE_GATHER_4PARTS, "use_gather_4parts", + ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ALDERLAKE | m_GENERIC)) + +/* X86_TUNE_USE_GATHER: Use gather instructions for vectors with 8 or more + elements. */ +DEF_TUNE (X86_TUNE_USE_GATHER, "use_gather", + ~(m_ZNVER1 | m_ZNVER2 | m_ALDERLAKE | m_GENERIC)) + +/* X86_TUNE_AVOID_128FMA_CHAINS: Avoid creating loops with tight 128bit or + smaller FMA chain. */ +DEF_TUNE (X86_TUNE_AVOID_128FMA_CHAINS, "avoid_fma_chains", m_ZNVER) + +/* X86_TUNE_AVOID_256FMA_CHAINS: Avoid creating loops with tight 256bit or + smaller FMA chain. */ +DEF_TUNE (X86_TUNE_AVOID_256FMA_CHAINS, "avoid_fma256_chains", m_ZNVER2 | m_ZNVER3) + +/* X86_TUNE_V2DF_REDUCTION_PREFER_PHADDPD: Prefer haddpd + for v2df vector reduction. */ +DEF_TUNE (X86_TUNE_V2DF_REDUCTION_PREFER_HADDPD, + "v2df_reduction_prefer_haddpd", m_NONE) + +/*****************************************************************************/ +/* AVX instruction selection tuning (some of SSE flags affects AVX, too) */ +/*****************************************************************************/ + +/* X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL: if false, unaligned loads are + split. */ +DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL, "256_unaligned_load_optimal", + ~(m_NEHALEM | m_SANDYBRIDGE)) + +/* X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL: if false, unaligned stores are + split. */ +DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL, "256_unaligned_store_optimal", + ~(m_NEHALEM | m_SANDYBRIDGE | m_BDVER | m_ZNVER1)) + +/* X86_TUNE_AVX256_SPLIT_REGS: if true, AVX256 ops are split into two AVX128 ops. */ +DEF_TUNE (X86_TUNE_AVX256_SPLIT_REGS, "avx256_split_regs",m_BDVER | m_BTVER2 + | m_ZNVER1) + +/* X86_TUNE_AVX128_OPTIMAL: Enable 128-bit AVX instruction generation for + the auto-vectorizer. */ +DEF_TUNE (X86_TUNE_AVX128_OPTIMAL, "avx128_optimal", m_BDVER | m_BTVER2 + | m_ZNVER1) + +/* X86_TUNE_AVX256_OPTIMAL: Use 256-bit AVX instructions instead of 512-bit AVX + instructions in the auto-vectorizer. */ +DEF_TUNE (X86_TUNE_AVX256_OPTIMAL, "avx256_optimal", m_CORE_AVX512) + +/* X86_TUNE_AVX256_MOVE_BY_PIECES: Optimize move_by_pieces with 256-bit + AVX instructions. */ +DEF_TUNE (X86_TUNE_AVX256_MOVE_BY_PIECES, "avx256_move_by_pieces", + m_CORE_AVX512) + +/* X86_TUNE_AVX256_STORE_BY_PIECES: Optimize store_by_pieces with 256-bit + AVX instructions. */ +DEF_TUNE (X86_TUNE_AVX256_STORE_BY_PIECES, "avx256_store_by_pieces", + m_CORE_AVX512) + +/* X86_TUNE_AVX512_MOVE_BY_PIECES: Optimize move_by_pieces with 512-bit + AVX instructions. */ +DEF_TUNE (X86_TUNE_AVX512_MOVE_BY_PIECES, "avx512_move_by_pieces", + m_SAPPHIRERAPIDS) + +/* X86_TUNE_AVX512_STORE_BY_PIECES: Optimize store_by_pieces with 512-bit + AVX instructions. */ +DEF_TUNE (X86_TUNE_AVX512_STORE_BY_PIECES, "avx512_store_by_pieces", + m_SAPPHIRERAPIDS) + +/*****************************************************************************/ +/*****************************************************************************/ +/* Historical relics: tuning flags that helps a specific old CPU designs */ +/*****************************************************************************/ + +/* X86_TUNE_DOUBLE_WITH_ADD: Use add instead of sal to double value in + an integer register. */ +DEF_TUNE (X86_TUNE_DOUBLE_WITH_ADD, "double_with_add", ~m_386) + +/* X86_TUNE_ALWAYS_FANCY_MATH_387: controls use of fancy 387 operations, + such as fsqrt, fprem, fsin, fcos, fsincos etc. + Should be enabled for all targets that always has coprocesor. */ +DEF_TUNE (X86_TUNE_ALWAYS_FANCY_MATH_387, "always_fancy_math_387", + ~(m_386 | m_486 | m_LAKEMONT)) + +/* X86_TUNE_UNROLL_STRLEN: Produce (quite lame) unrolled sequence for + inline strlen. This affects only -minline-all-stringops mode. By + default we always dispatch to a library since our internal strlen + is bad. */ +DEF_TUNE (X86_TUNE_UNROLL_STRLEN, "unroll_strlen", ~m_386) + +/* X86_TUNE_SHIFT1: Enables use of short encoding of "sal reg" instead of + longer "sal $1, reg". */ +DEF_TUNE (X86_TUNE_SHIFT1, "shift1", ~m_486) + +/* X86_TUNE_ZERO_EXTEND_WITH_AND: Use AND instruction instead + of mozbl/movwl. */ +DEF_TUNE (X86_TUNE_ZERO_EXTEND_WITH_AND, "zero_extend_with_and", + m_486 | m_PENT) + +/* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode + and SImode multiply, but 386 and 486 do HImode multiply faster. */ +DEF_TUNE (X86_TUNE_PROMOTE_HIMODE_IMUL, "promote_himode_imul", + ~(m_386 | m_486)) + +/* X86_TUNE_FAST_PREFIX: Enable demoting some 32bit or 64bit arithmetic + into 16bit/8bit when resulting sequence is shorter. For example + for "and $-65536, reg" to 16bit store of 0. */ +DEF_TUNE (X86_TUNE_FAST_PREFIX, "fast_prefix", + ~(m_386 | m_486 | m_PENT | m_LAKEMONT)) + +/* X86_TUNE_READ_MODIFY_WRITE: Enable use of read modify write instructions + such as "add $1, mem". */ +DEF_TUNE (X86_TUNE_READ_MODIFY_WRITE, "read_modify_write", + ~(m_PENT | m_LAKEMONT)) + +/* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR + than a MOV. */ +DEF_TUNE (X86_TUNE_MOVE_M1_VIA_OR, "move_m1_via_or", m_PENT | m_LAKEMONT) + +/* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is, + but one byte longer. */ +DEF_TUNE (X86_TUNE_NOT_UNPAIRABLE, "not_unpairable", m_PENT | m_LAKEMONT) + +/* X86_TUNE_PARTIAL_REG_STALL: Pentium pro, unlike later chips, handled + use of partial registers by renaming. This improved performance of 16bit + code where upper halves of registers are not used. It also leads to + an penalty whenever a 16bit store is followed by 32bit use. This flag + disables production of such sequences in common cases. + See also X86_TUNE_HIMODE_MATH. + + In current implementation the partial register stalls are not eliminated + very well - they can be introduced via subregs synthesized by combine + and can happen in caller/callee saving sequences. */ +DEF_TUNE (X86_TUNE_PARTIAL_REG_STALL, "partial_reg_stall", m_PPRO) + +/* X86_TUNE_PROMOTE_QIMODE: When it is cheap, turn 8bit arithmetic to + corresponding 32bit arithmetic. */ +DEF_TUNE (X86_TUNE_PROMOTE_QIMODE, "promote_qimode", + ~m_PPRO) + +/* X86_TUNE_PROMOTE_HI_REGS: Same, but for 16bit artihmetic. Again we avoid + partial register stalls on PentiumPro targets. */ +DEF_TUNE (X86_TUNE_PROMOTE_HI_REGS, "promote_hi_regs", m_PPRO) + +/* X86_TUNE_HIMODE_MATH: Enable use of 16bit arithmetic. + On PPro this flag is meant to avoid partial register stalls. */ +DEF_TUNE (X86_TUNE_HIMODE_MATH, "himode_math", ~m_PPRO) + +/* X86_TUNE_SPLIT_LONG_MOVES: Avoid instructions moving immediates + directly to memory. */ +DEF_TUNE (X86_TUNE_SPLIT_LONG_MOVES, "split_long_moves", m_PPRO) + +/* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */ +DEF_TUNE (X86_TUNE_USE_XCHGB, "use_xchgb", m_PENT4) + +/* X86_TUNE_USE_MOV0: Use "mov $0, reg" instead of "xor reg, reg" to clear + integer register. */ +DEF_TUNE (X86_TUNE_USE_MOV0, "use_mov0", m_K6) + +/* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory + operand that cannot be represented using a modRM byte. The XOR + replacement is long decoded, so this split helps here as well. */ +DEF_TUNE (X86_TUNE_NOT_VECTORMODE, "not_vectormode", m_K6) + +/* X86_TUNE_AVOID_VECTOR_DECODE: Enable splitters that avoid vector decoded + forms of instructions on K8 targets. */ +DEF_TUNE (X86_TUNE_AVOID_VECTOR_DECODE, "avoid_vector_decode", + m_K8) + +/*****************************************************************************/ +/* This never worked well before. */ +/*****************************************************************************/ + +/* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based + on simulation result. But after P4 was made, no performance benefit + was observed with branch hints. It also increases the code size. + As a result, icc never generates branch hints. */ +DEF_TUNE (X86_TUNE_BRANCH_PREDICTION_HINTS, "branch_prediction_hints", m_NONE) + +/* X86_TUNE_QIMODE_MATH: Enable use of 8bit arithmetic. */ +DEF_TUNE (X86_TUNE_QIMODE_MATH, "qimode_math", m_ALL) + +/* X86_TUNE_PROMOTE_QI_REGS: This enables generic code that promotes all 8bit + arithmetic to 32bit via PROMOTE_MODE macro. This code generation scheme + is usually used for RISC targets. */ +DEF_TUNE (X86_TUNE_PROMOTE_QI_REGS, "promote_qi_regs", m_NONE) + +/* X86_TUNE_EMIT_VZEROUPPER: This enables vzeroupper instruction insertion + before a transfer of control flow out of the function. */ +DEF_TUNE (X86_TUNE_EMIT_VZEROUPPER, "emit_vzeroupper", ~m_KNL) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/initfini-array.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/initfini-array.h new file mode 100644 index 0000000..c9b20d5 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/initfini-array.h @@ -0,0 +1,45 @@ +/* Definitions for ELF systems with .init_array/.fini_array section + support. + Copyright (C) 2011-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#if HAVE_INITFINI_ARRAY_SUPPORT + +#define USE_INITFINI_ARRAY + +#undef INIT_SECTION_ASM_OP +#undef FINI_SECTION_ASM_OP + +#undef INIT_ARRAY_SECTION_ASM_OP +#define INIT_ARRAY_SECTION_ASM_OP + +#undef FINI_ARRAY_SECTION_ASM_OP +#define FINI_ARRAY_SECTION_ASM_OP + +/* Use .init_array/.fini_array section for constructors and destructors. */ +#undef TARGET_ASM_CONSTRUCTOR +#define TARGET_ASM_CONSTRUCTOR default_elf_init_array_asm_out_constructor +#undef TARGET_ASM_DESTRUCTOR +#define TARGET_ASM_DESTRUCTOR default_elf_fini_array_asm_out_destructor + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/linux-android.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/linux-android.h new file mode 100644 index 0000000..cf34066 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/linux-android.h @@ -0,0 +1,65 @@ +/* Configuration file for Linux Android targets. + Copyright (C) 2008-2022 Free Software Foundation, Inc. + Contributed by Doug Kwan (dougkwan@google.com) + Rewritten by CodeSourcery, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#define ANDROID_TARGET_OS_CPP_BUILTINS() \ + do { \ + if (TARGET_ANDROID) \ + builtin_define ("__ANDROID__"); \ + } while (0) + +#define ANDROID_TARGET_D_OS_VERSIONS() \ + do { \ + if (TARGET_ANDROID) \ + builtin_version ("Android"); \ + } while (0) + +#if ANDROID_DEFAULT +# define NOANDROID "mno-android" +#else +# define NOANDROID "!mandroid" +#endif + +#define LINUX_OR_ANDROID_CC(LINUX_SPEC, ANDROID_SPEC) \ + "%{" NOANDROID "|tno-android-cc:" LINUX_SPEC ";:" ANDROID_SPEC "}" + +#define LINUX_OR_ANDROID_LD(LINUX_SPEC, ANDROID_SPEC) \ + "%{" NOANDROID "|tno-android-ld:" LINUX_SPEC ";:" ANDROID_SPEC "}" + +#define ANDROID_LINK_SPEC \ + "%{shared: -Bsymbolic}" + +#define ANDROID_CC1_SPEC \ + "%{!mglibc:%{!muclibc:%{!mbionic: -mbionic}}} " \ + "%{!fno-pic:%{!fno-PIC:%{!fpic:%{!fPIC: -fPIC}}}}" + +#define ANDROID_CC1PLUS_SPEC \ + "%{!fexceptions:%{!fno-exceptions: -fno-exceptions}} " \ + "%{!frtti:%{!fno-rtti: -fno-rtti}}" + +#define ANDROID_LIB_SPEC \ + "%{!static: -ldl}" + +#define ANDROID_STARTFILE_SPEC \ + "%{shared: crtbegin_so%O%s;:" \ + " %{static: crtbegin_static%O%s;: crtbegin_dynamic%O%s}}" + +#define ANDROID_ENDFILE_SPEC \ + "%{shared: crtend_so%O%s;: crtend_android%O%s}" diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/linux-protos.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/linux-protos.h new file mode 100644 index 0000000..c29c67e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/linux-protos.h @@ -0,0 +1,22 @@ +/* Prototypes. + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +extern bool linux_has_ifunc_p (void); + +extern bool linux_libc_has_function (enum function_class fn_class, tree); diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/linux.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/linux.h new file mode 100644 index 0000000..74f7079 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/linux.h @@ -0,0 +1,228 @@ +/* Definitions for systems using the Linux kernel, with or without + MMU, using ELF at the compiler level but possibly FLT for final + linked executables and shared libraries in some no-MMU cases, and + possibly with a choice of libc implementations. + Copyright (C) 1995-2022 Free Software Foundation, Inc. + Contributed by Eric Youngdale. + Modified for stabs-in-ELF by H.J. Lu (hjl@lucon.org). + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* C libraries supported on Linux. */ +#ifdef SINGLE_LIBC +#define OPTION_GLIBC_P(opts) (DEFAULT_LIBC == LIBC_GLIBC) +#define OPTION_UCLIBC_P(opts) (DEFAULT_LIBC == LIBC_UCLIBC) +#define OPTION_BIONIC_P(opts) (DEFAULT_LIBC == LIBC_BIONIC) +#undef OPTION_MUSL_P +#define OPTION_MUSL_P(opts) (DEFAULT_LIBC == LIBC_MUSL) +#else +#define OPTION_GLIBC_P(opts) ((opts)->x_linux_libc == LIBC_GLIBC) +#define OPTION_UCLIBC_P(opts) ((opts)->x_linux_libc == LIBC_UCLIBC) +#define OPTION_BIONIC_P(opts) ((opts)->x_linux_libc == LIBC_BIONIC) +#undef OPTION_MUSL_P +#define OPTION_MUSL_P(opts) ((opts)->x_linux_libc == LIBC_MUSL) +#endif +#define OPTION_GLIBC OPTION_GLIBC_P (&global_options) +#define OPTION_UCLIBC OPTION_UCLIBC_P (&global_options) +#define OPTION_BIONIC OPTION_BIONIC_P (&global_options) +#undef OPTION_MUSL +#define OPTION_MUSL OPTION_MUSL_P (&global_options) + +#define GNU_USER_TARGET_OS_CPP_BUILTINS() \ + do { \ + if (OPTION_GLIBC) \ + builtin_define ("__gnu_linux__"); \ + builtin_define_std ("linux"); \ + builtin_define_std ("unix"); \ + builtin_assert ("system=linux"); \ + builtin_assert ("system=unix"); \ + builtin_assert ("system=posix"); \ + } while (0) + +#define GNU_USER_TARGET_D_OS_VERSIONS() \ + do { \ + builtin_version ("linux"); \ + if (OPTION_GLIBC) \ + builtin_version ("CRuntime_Glibc"); \ + else if (OPTION_UCLIBC) \ + builtin_version ("CRuntime_UClibc"); \ + else if (OPTION_BIONIC) \ + builtin_version ("CRuntime_Bionic"); \ + else if (OPTION_MUSL) \ + builtin_version ("CRuntime_Musl"); \ + } while (0) + +/* Determine which dynamic linker to use depending on whether GLIBC or + uClibc or Bionic or musl is the default C library and whether + -muclibc or -mglibc or -mbionic or -mmusl has been passed to change + the default. */ + +#define CHOOSE_DYNAMIC_LINKER1(LIBC1, LIBC2, LIBC3, LIBC4, LD1, LD2, LD3, LD4) \ + "%{" LIBC2 ":" LD2 ";:%{" LIBC3 ":" LD3 ";:%{" LIBC4 ":" LD4 ";:" LD1 "}}}" + +#if DEFAULT_LIBC == LIBC_GLIBC +#define CHOOSE_DYNAMIC_LINKER(G, U, B, M) \ + CHOOSE_DYNAMIC_LINKER1 ("mglibc", "muclibc", "mbionic", "mmusl", G, U, B, M) +#elif DEFAULT_LIBC == LIBC_UCLIBC +#define CHOOSE_DYNAMIC_LINKER(G, U, B, M) \ + CHOOSE_DYNAMIC_LINKER1 ("muclibc", "mglibc", "mbionic", "mmusl", U, G, B, M) +#elif DEFAULT_LIBC == LIBC_BIONIC +#define CHOOSE_DYNAMIC_LINKER(G, U, B, M) \ + CHOOSE_DYNAMIC_LINKER1 ("mbionic", "mglibc", "muclibc", "mmusl", B, G, U, M) +#elif DEFAULT_LIBC == LIBC_MUSL +#define CHOOSE_DYNAMIC_LINKER(G, U, B, M) \ + CHOOSE_DYNAMIC_LINKER1 ("mmusl", "mglibc", "muclibc", "mbionic", M, G, U, B) +#else +#error "Unsupported DEFAULT_LIBC" +#endif /* DEFAULT_LIBC */ + +/* For most targets the following definitions suffice; + GLIBC_DYNAMIC_LINKER must be defined for each target using them, or + GLIBC_DYNAMIC_LINKER32 and GLIBC_DYNAMIC_LINKER64 for targets + supporting both 32-bit and 64-bit compilation. */ +#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0" +#define UCLIBC_DYNAMIC_LINKER32 "/lib/ld-uClibc.so.0" +#define UCLIBC_DYNAMIC_LINKER64 "/lib/ld64-uClibc.so.0" +#define UCLIBC_DYNAMIC_LINKERX32 "/lib/ldx32-uClibc.so.0" +#define BIONIC_DYNAMIC_LINKER "/system/bin/linker" +#define BIONIC_DYNAMIC_LINKER32 "/system/bin/linker" +#define BIONIC_DYNAMIC_LINKER64 "/system/bin/linker64" +#define BIONIC_DYNAMIC_LINKERX32 "/system/bin/linkerx32" +/* Should be redefined for each target that supports musl. */ +#define MUSL_DYNAMIC_LINKER "/dev/null" +#define MUSL_DYNAMIC_LINKER32 "/dev/null" +#define MUSL_DYNAMIC_LINKER64 "/dev/null" +#define MUSL_DYNAMIC_LINKERX32 "/dev/null" + +#define GNU_USER_DYNAMIC_LINKER \ + CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER, UCLIBC_DYNAMIC_LINKER, \ + BIONIC_DYNAMIC_LINKER, MUSL_DYNAMIC_LINKER) +#define GNU_USER_DYNAMIC_LINKER32 \ + CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER32, UCLIBC_DYNAMIC_LINKER32, \ + BIONIC_DYNAMIC_LINKER32, MUSL_DYNAMIC_LINKER32) +#define GNU_USER_DYNAMIC_LINKER64 \ + CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER64, UCLIBC_DYNAMIC_LINKER64, \ + BIONIC_DYNAMIC_LINKER64, MUSL_DYNAMIC_LINKER64) +#define GNU_USER_DYNAMIC_LINKERX32 \ + CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKERX32, UCLIBC_DYNAMIC_LINKERX32, \ + BIONIC_DYNAMIC_LINKERX32, MUSL_DYNAMIC_LINKERX32) + +/* Whether we have Bionic libc runtime */ +#undef TARGET_HAS_BIONIC +#define TARGET_HAS_BIONIC (OPTION_BIONIC) + +/* musl avoids problematic includes by rearranging the include directories. + * Unfortunately, this is mostly duplicated from cppdefault.cc */ +#if DEFAULT_LIBC == LIBC_MUSL +#define INCLUDE_DEFAULTS_MUSL_GPP \ + { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1, \ + GPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT, 0 }, \ + { GPLUSPLUS_TOOL_INCLUDE_DIR, "G++", 1, 1, \ + GPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT, 1 }, \ + { GPLUSPLUS_BACKWARD_INCLUDE_DIR, "G++", 1, 1, \ + GPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT, 0 }, + +#ifdef LOCAL_INCLUDE_DIR +#define INCLUDE_DEFAULTS_MUSL_LOCAL \ + { LOCAL_INCLUDE_DIR, 0, 0, 1, 1, 2 }, \ + { LOCAL_INCLUDE_DIR, 0, 0, 1, 1, 0 }, +#else +#define INCLUDE_DEFAULTS_MUSL_LOCAL +#endif + +#ifdef PREFIX_INCLUDE_DIR +#define INCLUDE_DEFAULTS_MUSL_PREFIX \ + { PREFIX_INCLUDE_DIR, 0, 0, 1, 0, 0}, +#else +#define INCLUDE_DEFAULTS_MUSL_PREFIX +#endif + +#ifdef CROSS_INCLUDE_DIR +#define INCLUDE_DEFAULTS_MUSL_CROSS \ + { CROSS_INCLUDE_DIR, "GCC", 0, 0, 0, 0}, +#else +#define INCLUDE_DEFAULTS_MUSL_CROSS +#endif + +#ifdef TOOL_INCLUDE_DIR +#define INCLUDE_DEFAULTS_MUSL_TOOL \ + { TOOL_INCLUDE_DIR, "BINUTILS", 0, 1, 0, 0}, +#else +#define INCLUDE_DEFAULTS_MUSL_TOOL +#endif + +#ifdef NATIVE_SYSTEM_HEADER_DIR +#define INCLUDE_DEFAULTS_MUSL_NATIVE \ + { NATIVE_SYSTEM_HEADER_DIR, 0, 0, 0, 1, 2 }, \ + { NATIVE_SYSTEM_HEADER_DIR, 0, 0, 0, 1, 0 }, +#else +#define INCLUDE_DEFAULTS_MUSL_NATIVE +#endif + +#if defined (CROSS_DIRECTORY_STRUCTURE) && !defined (TARGET_SYSTEM_ROOT) +# undef INCLUDE_DEFAULTS_MUSL_LOCAL +# define INCLUDE_DEFAULTS_MUSL_LOCAL +# undef INCLUDE_DEFAULTS_MUSL_NATIVE +# define INCLUDE_DEFAULTS_MUSL_NATIVE +#else +# undef INCLUDE_DEFAULTS_MUSL_CROSS +# define INCLUDE_DEFAULTS_MUSL_CROSS +#endif + +#undef INCLUDE_DEFAULTS +#define INCLUDE_DEFAULTS \ + { \ + INCLUDE_DEFAULTS_MUSL_GPP \ + INCLUDE_DEFAULTS_MUSL_LOCAL \ + INCLUDE_DEFAULTS_MUSL_PREFIX \ + INCLUDE_DEFAULTS_MUSL_CROSS \ + INCLUDE_DEFAULTS_MUSL_TOOL \ + INCLUDE_DEFAULTS_MUSL_NATIVE \ + { GCC_INCLUDE_DIR, "GCC", 0, 1, 0, 0 }, \ + { 0, 0, 0, 0, 0, 0 } \ + } +#endif + +#if (DEFAULT_LIBC == LIBC_UCLIBC) && defined (SINGLE_LIBC) /* uClinux */ +/* This is a *uclinux* target. We don't define below macros to normal linux + versions, because doing so would require *uclinux* targets to include + linux.cc, linux-protos.h, linux.opt, etc. We could, alternatively, add + these files to *uclinux* targets, but that would only pollute option list + (add -mglibc, etc.) without adding any useful support. */ + +/* Define TARGET_LIBC_HAS_FUNCTION for *uclinux* targets to + no_c99_libc_has_function, because uclibc does not, normally, have + c99 runtime. If, in special cases, uclibc does have c99 runtime, + this should be defined to a new hook. Also please note that for targets + like *-linux-uclibc that similar check will also need to be added to + linux_libc_has_function. */ +# undef TARGET_LIBC_HAS_FUNCTION +# define TARGET_LIBC_HAS_FUNCTION no_c99_libc_has_function + +#else /* !uClinux, i.e., normal Linux */ + +/* Determine what functions are present at the runtime; + this includes full c99 runtime and sincos. */ +# undef TARGET_LIBC_HAS_FUNCTION +# define TARGET_LIBC_HAS_FUNCTION linux_libc_has_function + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/vxworks-dummy.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/vxworks-dummy.h new file mode 100644 index 0000000..6d78d9b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/config/vxworks-dummy.h @@ -0,0 +1,48 @@ +/* Dummy definitions of VxWorks-related macros + Copyright (C) 2007-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* True if we're targeting VxWorks, VxWorks7 and/or 64bit. */ +#ifndef TARGET_VXWORKS +#define TARGET_VXWORKS 0 +#endif + +#ifndef TARGET_VXWORKS7 +#define TARGET_VXWORKS7 0 +#endif + +#ifndef TARGET_VXWORKS64 +#define TARGET_VXWORKS64 0 +#endif + +/* True if generating code for a VxWorks RTP. */ +#ifndef TARGET_VXWORKS_RTP +#define TARGET_VXWORKS_RTP false +#endif + +/* The symbol that points to an RTP's table of GOTs. */ +#define VXWORKS_GOTT_BASE (gcc_unreachable (), "") + +/* The symbol that holds the index of the current module's GOT in + VXWORKS_GOTT_BASE. */ +#define VXWORKS_GOTT_INDEX (gcc_unreachable (), "") diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/configargs.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/configargs.h new file mode 100644 index 0000000..835c7e1 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/configargs.h @@ -0,0 +1,7 @@ +/* Generated automatically. */ +static const char configuration_arguments[] = "/mnt/everest/sources/mussel/sources/gcc/gcc-12.2.0/configure --prefix=/mnt/everest/sources/mussel/toolchain --target=x86_64-linux-musl --with-sysroot=/mnt/everest/sources/mussel/sysroot --enable-languages=c,c++ --disable-multilib --disable-bootstrap --disable-libsanitizer --disable-werror --enable-initfini-array --with-arch=x86-64 --with-tune=generic"; +static const char thread_model[] = "posix"; + +static const struct { + const char *name, *value; +} configure_default_options[] = { { "cpu", "generic" }, { "arch", "x86-64" }, { "tune", "generic" } }; diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/context.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/context.h new file mode 100644 index 0000000..00c01fe --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/context.h @@ -0,0 +1,69 @@ +/* context.h - Holder for global state + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CONTEXT_H +#define GCC_CONTEXT_H + +namespace gcc { + +class pass_manager; +class dump_manager; + +/* GCC's internal state can be divided into zero or more + "parallel universe" of state; an instance of this class is one such + context of state. */ +class context +{ +public: + context (); + ~context (); + + /* The flag shows if there are symbols to be streamed for offloading. */ + bool have_offload; + + /* Pass-management. */ + + void set_passes (pass_manager *m) + { + gcc_assert (!m_passes); + m_passes = m; + } + + pass_manager *get_passes () { gcc_assert (m_passes); return m_passes; } + + /* Handling dump files. */ + + dump_manager *get_dumps () {gcc_assert (m_dumps); return m_dumps; } + +private: + /* Pass-management. */ + pass_manager *m_passes; + + /* Dump files. */ + dump_manager *m_dumps; + +}; // class context + +} // namespace gcc + +/* The global singleton context aka "g". + (the name is chosen to be easy to type in a debugger). */ +extern gcc::context *g; + +#endif /* ! GCC_CONTEXT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/convert.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/convert.h new file mode 100644 index 0000000..323b4af --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/convert.h @@ -0,0 +1,45 @@ +/* Definition of functions in convert.cc. + Copyright (C) 1993-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CONVERT_H +#define GCC_CONVERT_H + +extern tree convert_to_integer (tree, tree); +extern tree convert_to_integer_maybe_fold (tree, tree, bool); +extern tree convert_to_pointer (tree, tree); +extern tree convert_to_pointer_maybe_fold (tree, tree, bool); +extern tree convert_to_real (tree, tree); +extern tree convert_to_real_maybe_fold (tree, tree, bool); +extern tree convert_to_fixed (tree, tree); +extern tree convert_to_complex (tree, tree); +extern tree convert_to_complex_maybe_fold (tree, tree, bool); +extern tree convert_to_vector (tree, tree); + +extern inline tree convert_to_integer_nofold (tree t, tree x) +{ return convert_to_integer_maybe_fold (t, x, false); } +extern inline tree convert_to_pointer_nofold (tree t, tree x) +{ return convert_to_pointer_maybe_fold (t, x, false); } +extern inline tree convert_to_real_nofold (tree t, tree x) +{ return convert_to_real_maybe_fold (t, x, false); } +extern inline tree convert_to_complex_nofold (tree t, tree x) +{ return convert_to_complex_maybe_fold (t, x, false); } + +extern tree preserve_any_location_wrapper (tree result, tree orig_expr); + +#endif /* GCC_CONVERT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/coretypes.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/coretypes.h new file mode 100644 index 0000000..08b9ac9 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/coretypes.h @@ -0,0 +1,495 @@ +/* GCC core type declarations. + Copyright (C) 2002-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* Provide forward declarations of core types which are referred to by + most of the compiler. This allows header files to use these types + (e.g. in function prototypes) without concern for whether the full + definitions are visible. Some other declarations that need to be + universally visible are here, too. + + In the context of tconfig.h, most of these have special definitions + which prevent them from being used except in further type + declarations. This is a kludge; the right thing is to avoid + including the "tm.h" header set in the context of tconfig.h, but + we're not there yet. */ + +#ifndef GCC_CORETYPES_H +#define GCC_CORETYPES_H + +#ifndef GTY +#define GTY(x) /* nothing - marker for gengtype */ +#endif + +#ifndef USED_FOR_TARGET + +typedef int64_t gcov_type; +typedef uint64_t gcov_type_unsigned; + +struct bitmap_obstack; +class bitmap_head; +typedef class bitmap_head *bitmap; +typedef const class bitmap_head *const_bitmap; +struct simple_bitmap_def; +typedef struct simple_bitmap_def *sbitmap; +typedef const struct simple_bitmap_def *const_sbitmap; +struct rtx_def; +typedef struct rtx_def *rtx; +typedef const struct rtx_def *const_rtx; +class scalar_mode; +class scalar_int_mode; +class scalar_float_mode; +class complex_mode; +class fixed_size_mode; +template class opt_mode; +typedef opt_mode opt_scalar_mode; +typedef opt_mode opt_scalar_int_mode; +typedef opt_mode opt_scalar_float_mode; +template struct pod_mode; +typedef pod_mode scalar_mode_pod; +typedef pod_mode scalar_int_mode_pod; +typedef pod_mode fixed_size_mode_pod; + +/* Subclasses of rtx_def, using indentation to show the class + hierarchy, along with the relevant invariant. + Where possible, keep this list in the same order as in rtl.def. */ +struct rtx_def; + struct rtx_expr_list; /* GET_CODE (X) == EXPR_LIST */ + struct rtx_insn_list; /* GET_CODE (X) == INSN_LIST */ + struct rtx_sequence; /* GET_CODE (X) == SEQUENCE */ + struct rtx_insn; + struct rtx_debug_insn; /* DEBUG_INSN_P (X) */ + struct rtx_nonjump_insn; /* NONJUMP_INSN_P (X) */ + struct rtx_jump_insn; /* JUMP_P (X) */ + struct rtx_call_insn; /* CALL_P (X) */ + struct rtx_jump_table_data; /* JUMP_TABLE_DATA_P (X) */ + struct rtx_barrier; /* BARRIER_P (X) */ + struct rtx_code_label; /* LABEL_P (X) */ + struct rtx_note; /* NOTE_P (X) */ + +struct rtvec_def; +typedef struct rtvec_def *rtvec; +typedef const struct rtvec_def *const_rtvec; +struct hwivec_def; +typedef struct hwivec_def *hwivec; +typedef const struct hwivec_def *const_hwivec; +union tree_node; +typedef union tree_node *tree; +typedef const union tree_node *const_tree; +struct gimple; +typedef gimple *gimple_seq; +struct gimple_stmt_iterator; + +/* Forward decls for leaf gimple subclasses (for individual gimple codes). + Keep this in the same order as the corresponding codes in gimple.def. */ + +struct gcond; +struct gdebug; +struct ggoto; +struct glabel; +struct gswitch; +struct gassign; +struct gasm; +struct gcall; +struct gtransaction; +struct greturn; +struct gbind; +struct gcatch; +struct geh_filter; +struct geh_mnt; +struct geh_else; +struct gresx; +struct geh_dispatch; +struct gphi; +struct gtry; +struct gomp_atomic_load; +struct gomp_atomic_store; +struct gomp_continue; +struct gomp_critical; +struct gomp_ordered; +struct gomp_for; +struct gomp_parallel; +struct gomp_task; +struct gomp_sections; +struct gomp_single; +struct gomp_target; +struct gomp_teams; + +/* Subclasses of symtab_node, using indentation to show the class + hierarchy. */ + +struct symtab_node; + struct cgraph_node; + struct varpool_node; +struct cgraph_edge; + +union section; +typedef union section section; +struct gcc_options; +struct cl_target_option; +struct cl_optimization; +struct cl_option; +struct cl_decoded_option; +struct cl_option_handlers; +struct diagnostic_context; +class pretty_printer; +class diagnostic_event_id_t; +typedef const char * (*diagnostic_input_charset_callback)(const char *); + +template struct array_traits; + +/* Provides a read-only bitmap view of a single integer bitmask or an + array of integer bitmasks, or of a wrapper around such bitmasks. */ +template, + bool has_constant_size = Traits::has_constant_size> +class bitmap_view; + +/* Address space number for named address space support. */ +typedef unsigned char addr_space_t; + +/* The value of addr_space_t that represents the generic address space. */ +#define ADDR_SPACE_GENERIC 0 +#define ADDR_SPACE_GENERIC_P(AS) ((AS) == ADDR_SPACE_GENERIC) + +/* The major intermediate representations of GCC. */ +enum ir_type { + IR_GIMPLE, + IR_RTL_CFGRTL, + IR_RTL_CFGLAYOUT +}; + +/* Provide forward struct declaration so that we don't have to include + all of cpplib.h whenever a random prototype includes a pointer. + Note that the cpp_reader and cpp_token typedefs remain part of + cpplib.h. */ + +struct cpp_reader; +struct cpp_token; + +/* The thread-local storage model associated with a given VAR_DECL + or SYMBOL_REF. This isn't used much, but both trees and RTL refer + to it, so it's here. */ +enum tls_model { + TLS_MODEL_NONE, + TLS_MODEL_EMULATED, + TLS_MODEL_REAL, + TLS_MODEL_GLOBAL_DYNAMIC = TLS_MODEL_REAL, + TLS_MODEL_LOCAL_DYNAMIC, + TLS_MODEL_INITIAL_EXEC, + TLS_MODEL_LOCAL_EXEC +}; + +/* Types of ABI for an offload compiler. */ +enum offload_abi { + OFFLOAD_ABI_UNSET, + OFFLOAD_ABI_LP64, + OFFLOAD_ABI_ILP32 +}; + +/* Types of profile update methods. */ +enum profile_update { + PROFILE_UPDATE_SINGLE, + PROFILE_UPDATE_ATOMIC, + PROFILE_UPDATE_PREFER_ATOMIC +}; + +/* Type of profile reproducibility methods. */ +enum profile_reproducibility { + PROFILE_REPRODUCIBILITY_SERIAL, + PROFILE_REPRODUCIBILITY_PARALLEL_RUNS, + PROFILE_REPRODUCIBILITY_MULTITHREADED +}; + +/* Type of -fstack-protector-*. */ +enum stack_protector { + SPCT_FLAG_DEFAULT = 1, + SPCT_FLAG_ALL = 2, + SPCT_FLAG_STRONG = 3, + SPCT_FLAG_EXPLICIT = 4 +}; + +/* Types of unwind/exception handling info that can be generated. + Note that a UI_TARGET (or larger) setting is considered to be + incompatible with -freorder-blocks-and-partition. */ + +enum unwind_info_type +{ + UI_NONE, + UI_SJLJ, + UI_DWARF2, + UI_SEH, + UI_TARGET +}; + +/* Callgraph node profile representation. */ +enum node_frequency { + /* This function most likely won't be executed at all. + (set only when profile feedback is available or via function attribute). */ + NODE_FREQUENCY_UNLIKELY_EXECUTED, + /* For functions that are known to be executed once (i.e. constructors, destructors + and main function. */ + NODE_FREQUENCY_EXECUTED_ONCE, + /* The default value. */ + NODE_FREQUENCY_NORMAL, + /* Optimize this function hard + (set only when profile feedback is available or via function attribute). */ + NODE_FREQUENCY_HOT +}; + +/* Ways of optimizing code. */ +enum optimization_type { + /* Prioritize speed over size. */ + OPTIMIZE_FOR_SPEED, + + /* Only do things that are good for both size and speed. */ + OPTIMIZE_FOR_BOTH, + + /* Prioritize size over speed. */ + OPTIMIZE_FOR_SIZE +}; + +/* Enumerates a padding direction. */ +enum pad_direction { + /* No padding is required. */ + PAD_NONE, + + /* Insert padding above the data, i.e. at higher memeory addresses + when dealing with memory, and at the most significant end when + dealing with registers. */ + PAD_UPWARD, + + /* Insert padding below the data, i.e. at lower memeory addresses + when dealing with memory, and at the least significant end when + dealing with registers. */ + PAD_DOWNWARD +}; + +/* Possible initialization status of a variable. When requested + by the user, this information is tracked and recorded in the DWARF + debug information, along with the variable's location. */ +enum var_init_status +{ + VAR_INIT_STATUS_UNKNOWN, + VAR_INIT_STATUS_UNINITIALIZED, + VAR_INIT_STATUS_INITIALIZED +}; + +/* Names for the different levels of -Wstrict-overflow=N. The numeric + values here correspond to N. */ +enum warn_strict_overflow_code +{ + /* Overflow warning that should be issued with -Wall: a questionable + construct that is easy to avoid even when using macros. Example: + folding (x + CONSTANT > x) to 1. */ + WARN_STRICT_OVERFLOW_ALL = 1, + /* Overflow warning about folding a comparison to a constant because + of undefined signed overflow, other than cases covered by + WARN_STRICT_OVERFLOW_ALL. Example: folding (abs (x) >= 0) to 1 + (this is false when x == INT_MIN). */ + WARN_STRICT_OVERFLOW_CONDITIONAL = 2, + /* Overflow warning about changes to comparisons other than folding + them to a constant. Example: folding (x + 1 > 1) to (x > 0). */ + WARN_STRICT_OVERFLOW_COMPARISON = 3, + /* Overflow warnings not covered by the above cases. Example: + folding ((x * 10) / 5) to (x * 2). */ + WARN_STRICT_OVERFLOW_MISC = 4, + /* Overflow warnings about reducing magnitude of constants in + comparison. Example: folding (x + 2 > y) to (x + 1 >= y). */ + WARN_STRICT_OVERFLOW_MAGNITUDE = 5 +}; + +/* The type of an alias set. Code currently assumes that variables of + this type can take the values 0 (the alias set which aliases + everything) and -1 (sometimes indicating that the alias set is + unknown, sometimes indicating a memory barrier) and -2 (indicating + that the alias set should be set to a unique value but has not been + set yet). */ +typedef int alias_set_type; + +class edge_def; +typedef class edge_def *edge; +typedef const class edge_def *const_edge; +struct basic_block_def; +typedef struct basic_block_def *basic_block; +typedef const struct basic_block_def *const_basic_block; + +#if !defined (GENERATOR_FILE) +# define OBSTACK_CHUNK_SIZE memory_block_pool::block_size +# define obstack_chunk_alloc mempool_obstack_chunk_alloc +# define obstack_chunk_free mempool_obstack_chunk_free +#else +# define OBSTACK_CHUNK_SIZE 0 +# define obstack_chunk_alloc xmalloc +# define obstack_chunk_free free +#endif + +#define gcc_obstack_init(OBSTACK) \ + obstack_specify_allocation ((OBSTACK), OBSTACK_CHUNK_SIZE, 0, \ + obstack_chunk_alloc, \ + obstack_chunk_free) + +/* enum reg_class is target specific, so it should not appear in + target-independent code or interfaces, like the target hook declarations + in target.h. */ +typedef int reg_class_t; + +class rtl_opt_pass; + +namespace gcc { + class context; +} + +typedef std::pair tree_pair; +typedef std::pair string_int_pair; + +/* Define a name->value mapping. */ +template +struct kv_pair +{ + const char *const name; /* the name of the value */ + const ValueType value; /* the value of the name */ +}; + +#else + +struct _dont_use_rtx_here_; +struct _dont_use_rtvec_here_; +struct _dont_use_rtx_insn_here_; +union _dont_use_tree_here_; +#define rtx struct _dont_use_rtx_here_ * +#define const_rtx struct _dont_use_rtx_here_ * +#define rtvec struct _dont_use_rtvec_here * +#define const_rtvec struct _dont_use_rtvec_here * +#define rtx_insn struct _dont_use_rtx_insn_here_ +#define tree union _dont_use_tree_here_ * +#define const_tree union _dont_use_tree_here_ * + +typedef struct scalar_mode scalar_mode; +typedef struct scalar_int_mode scalar_int_mode; +typedef struct scalar_float_mode scalar_float_mode; +typedef struct complex_mode complex_mode; + +#endif + +/* Classes of functions that compiler needs to check + whether they are present at the runtime or not. */ +enum function_class { + function_c94, + function_c99_misc, + function_c99_math_complex, + function_sincos, + function_c11_misc, + function_c2x_misc +}; + +/* Enumerate visibility settings. This is deliberately ordered from most + to least visibility. */ +enum symbol_visibility +{ + VISIBILITY_DEFAULT, + VISIBILITY_PROTECTED, + VISIBILITY_HIDDEN, + VISIBILITY_INTERNAL +}; + +/* enums used by the targetm.excess_precision hook. */ + +enum flt_eval_method +{ + FLT_EVAL_METHOD_UNPREDICTABLE = -1, + FLT_EVAL_METHOD_PROMOTE_TO_FLOAT = 0, + FLT_EVAL_METHOD_PROMOTE_TO_DOUBLE = 1, + FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE = 2, + FLT_EVAL_METHOD_PROMOTE_TO_FLOAT16 = 16 +}; + +enum excess_precision_type +{ + EXCESS_PRECISION_TYPE_IMPLICIT, + EXCESS_PRECISION_TYPE_STANDARD, + EXCESS_PRECISION_TYPE_FAST, + EXCESS_PRECISION_TYPE_FLOAT16 +}; + +/* Level of size optimization. */ + +enum optimize_size_level +{ + /* Do not optimize for size. */ + OPTIMIZE_SIZE_NO, + /* Optimize for size but not at extreme performance costs. */ + OPTIMIZE_SIZE_BALANCED, + /* Optimize for size as much as possible. */ + OPTIMIZE_SIZE_MAX +}; + +/* Support for user-provided GGC and PCH markers. The first parameter + is a pointer to a pointer, the second either NULL if the pointer to + pointer points into a GC object or the actual pointer address if + the first argument points to a temporary and the third a cookie. */ +typedef void (*gt_pointer_operator) (void *, void *, void *); + +#if !defined (HAVE_UCHAR) +typedef unsigned char uchar; +#endif + +/* Most source files will require the following headers. */ +#if !defined (USED_FOR_TARGET) +#include "insn-modes.h" +#include "signop.h" +#include "wide-int.h" +#include "wide-int-print.h" + +/* On targets that don't need polynomial offsets, target-specific code + should be able to treat poly_int like a normal constant, with a + conversion operator going from the former to the latter. We also + allow this for gencondmd.cc for all targets, so that we can treat + machine_modes as enums without causing build failures. */ +#if (defined (IN_TARGET_CODE) \ + && (defined (USE_ENUM_MODES) || NUM_POLY_INT_COEFFS == 1)) +#define POLY_INT_CONVERSION 1 +#else +#define POLY_INT_CONVERSION 0 +#endif + +#include "poly-int.h" +#include "poly-int-types.h" +#include "insn-modes-inline.h" +#include "machmode.h" +#include "double-int.h" +#include "align.h" +/* Most host source files will require the following headers. */ +#if !defined (GENERATOR_FILE) +#include "iterator-utils.h" +#include "real.h" +#include "fixed-value.h" +#include "hash-table.h" +#include "hash-set.h" +#include "input.h" +#include "is-a.h" +#include "memory-block.h" +#include "dumpfile.h" +#endif +#endif /* GENERATOR_FILE && !USED_FOR_TARGET */ + +#endif /* coretypes.h */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/coroutine-builtins.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/coroutine-builtins.def new file mode 100644 index 0000000..feecdb5 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/coroutine-builtins.def @@ -0,0 +1,53 @@ +/* This file contains the definitions and documentation for the + coroutines builtins used in GCC. + + Copyright (C) 2018-2022 Free Software Foundation, Inc. + + Contributed by Iain Sandoe under contract to Facebook. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Before including this file, you should define a macro: + + DEF_BUILTIN_STUB(ENUM, NAME) + DEF_COROUTINE_BUILTIN (ENUM, NAME, TYPE, ATTRS) + + See builtins.def for details. + The builtins are created used by library implementations of C++ + coroutines. */ + +/* This has to come before all the coroutine builtins. */ +DEF_BUILTIN_STUB (BEGIN_COROUTINE_BUILTINS, (const char *) 0) + +/* These are the builtins that are externally-visible and used by the + standard library implementation of the coroutine header. */ + +DEF_COROUTINE_BUILTIN (BUILT_IN_CORO_PROMISE, "promise", + BT_FN_PTR_PTR_CONST_SIZE_BOOL, + ATTR_CONST_NOTHROW_LEAF_LIST) + +DEF_COROUTINE_BUILTIN (BUILT_IN_CORO_RESUME, "resume", BT_FN_VOID_PTR, + ATTR_NULL) + +DEF_COROUTINE_BUILTIN (BUILT_IN_CORO_DESTROY, "destroy", BT_FN_VOID_PTR, + ATTR_NULL) + +DEF_COROUTINE_BUILTIN (BUILT_IN_CORO_DONE, "done", BT_FN_BOOL_PTR, + ATTR_NOTHROW_LEAF_LIST) + +/* This has to come after all the coroutine builtins. */ +DEF_BUILTIN_STUB (END_COROUTINE_BUILTINS, (const char *) 0) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/coverage.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/coverage.h new file mode 100644 index 0000000..0ac046c --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/coverage.h @@ -0,0 +1,61 @@ +/* coverage.h - Defines data exported from coverage.cc + Copyright (C) 1998-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_COVERAGE_H +#define GCC_COVERAGE_H + +#include "gcov-io.h" + +extern void coverage_init (const char *); +extern void coverage_finish (void); +extern void coverage_remove_note_file (void); + +/* Start outputting coverage information for the current + function. */ +extern int coverage_begin_function (unsigned, unsigned); + +/* Complete the coverage information for the current function. */ +extern void coverage_end_function (unsigned, unsigned); + +/* Compute the control flow checksum for the function FN given as argument. */ +extern unsigned coverage_compute_cfg_checksum (struct function *fn); + +/* Compute the profile id of function N. */ +extern unsigned coverage_compute_profile_id (struct cgraph_node *n); + +/* Compute the line number checksum for the current function. */ +extern unsigned coverage_compute_lineno_checksum (void); + +/* Allocate some counters. Repeatable per function. */ +extern int coverage_counter_alloc (unsigned /*counter*/, unsigned/*num*/); +/* Use a counter from the most recent allocation. */ +extern tree tree_coverage_counter_ref (unsigned /*counter*/, unsigned/*num*/); +/* Use a counter address from the most recent allocation. */ +extern tree tree_coverage_counter_addr (unsigned /*counter*/, unsigned/*num*/); + +/* Get all the counters for the current function. */ +extern gcov_type *get_coverage_counts (unsigned /*counter*/, + unsigned /*cfg_checksum*/, + unsigned /*lineno_checksum*/, + unsigned /*n_counts*/); + +extern tree get_gcov_type (void); +extern bool coverage_node_map_initialized_p (void); + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/cp-tree.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/cp-tree.def new file mode 100644 index 0000000..6371f67 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/cp-tree.def @@ -0,0 +1,597 @@ +/* This file contains the definitions and documentation for the + additional tree codes used in the GNU C++ compiler (see tree.def + for the standard codes). + Copyright (C) 1987-2022 Free Software Foundation, Inc. + Hacked by Michael Tiemann (tiemann@cygnus.com) + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + + +/* An OFFSET_REF is used in two situations: + + 1. An expression of the form `A::m' where `A' is a class and `m' is + a non-static member. In this case, operand 0 will be a TYPE + (corresponding to `A') and operand 1 will be a FIELD_DECL, + BASELINK, or TEMPLATE_ID_EXPR (corresponding to `m'). + + The expression is a pointer-to-member if its address is taken, + but simply denotes a member of the object if its address is not + taken. + + This form is only used during the parsing phase; once semantic + analysis has taken place they are eliminated. + + 2. An expression of the form `x.*p'. In this case, operand 0 will + be an expression corresponding to `x' and operand 1 will be an + expression with pointer-to-member type. */ +DEFTREECODE (OFFSET_REF, "offset_ref", tcc_reference, 2) + +/* A pointer-to-member constant. For a pointer-to-member constant + `X::Y' The PTRMEM_CST_CLASS is the RECORD_TYPE for `X' and the + PTRMEM_CST_MEMBER is the _DECL for `Y'. */ +DEFTREECODE (PTRMEM_CST, "ptrmem_cst", tcc_constant, 0) + +/* For NEW_EXPR, operand 0 is the placement list. + Operand 1 is the new-declarator. + Operand 2 is the number of elements in the array. + Operand 3 is the initializer. */ +DEFTREECODE (NEW_EXPR, "nw_expr", tcc_expression, 4) +DEFTREECODE (VEC_NEW_EXPR, "vec_nw_expr", tcc_expression, 3) + +/* For DELETE_EXPR, operand 0 is the store to be destroyed. + Operand 1 is the value to pass to the destroying function + saying whether the store should be deallocated as well. */ +DEFTREECODE (DELETE_EXPR, "dl_expr", tcc_expression, 2) +DEFTREECODE (VEC_DELETE_EXPR, "vec_dl_expr", tcc_expression, 2) + +/* Value is reference to particular overloaded class method. + Operand 0 is the class, operand 1 is the field + The COMPLEXITY field holds the class level (usually 0). */ +DEFTREECODE (SCOPE_REF, "scope_ref", tcc_reference, 2) + +/* When composing an object with a member, this is the result. + Operand 0 is the object. Operand 1 is the member (usually + a dereferenced pointer to member). */ +DEFTREECODE (MEMBER_REF, "member_ref", tcc_reference, 2) + +/* Type conversion operator in C++. TREE_TYPE is type that this + operator converts to. Operand is expression to be converted. */ +DEFTREECODE (TYPE_EXPR, "type_expr", tcc_expression, 1) + +/* AGGR_INIT_EXPRs have a variably-sized representation similar to + that of CALL_EXPRs. Operand 0 is an INTEGER_CST node containing the + operand count, operand 1 is the function which performs initialization, + operand 2 is the slot which was allocated for this expression, and + the remaining operands are the arguments to the initialization function. */ +DEFTREECODE (AGGR_INIT_EXPR, "aggr_init_expr", tcc_vl_exp, 3) + +/* Initialization of an array from another array, expressed at a high level + so that it works with TARGET_EXPR. Operand 0 is the target, operand 1 + is the initializer. */ +DEFTREECODE (VEC_INIT_EXPR, "vec_init_expr", tcc_expression, 2) + +/* A throw expression. operand 0 is the expression, if there was one, + else it is NULL_TREE. */ +DEFTREECODE (THROW_EXPR, "throw_expr", tcc_expression, 1) + +/* An empty class object. The TREE_TYPE gives the class type. We use + these to avoid actually creating instances of the empty classes. */ +DEFTREECODE (EMPTY_CLASS_EXPR, "empty_class_expr", tcc_expression, 0) + +/* A reference to a member function or member functions from a base + class. BASELINK_FUNCTIONS gives the FUNCTION_DECL, + TEMPLATE_DECL, OVERLOAD, or TEMPLATE_ID_EXPR corresponding to the + functions. BASELINK_BINFO gives the base from which the functions + come, i.e., the base to which the `this' pointer must be converted + before the functions are called. BASELINK_ACCESS_BINFO gives the + base used to name the functions. + + A BASELINK is an expression; the TREE_TYPE of the BASELINK gives + the type of the expression. This type is either a FUNCTION_TYPE, + METHOD_TYPE, or `unknown_type_node' indicating that the function is + overloaded. */ +DEFTREECODE (BASELINK, "baselink", tcc_exceptional, 0) + +/* Template definition. The following fields have the specified uses, + although there are other macros in cp-tree.h that should be used for + accessing this data. + DECL_ARGUMENTS template parm vector + DECL_TEMPLATE_INFO template text &c + DECL_VINDEX list of instantiations already produced; + only done for functions so far + For class template: + DECL_INITIAL associated templates (methods &c) + DECL_TEMPLATE_RESULT null + For non-class templates: + TREE_TYPE type of object to be constructed + DECL_TEMPLATE_RESULT decl for object to be created + (e.g., FUNCTION_DECL with tmpl parms used) + */ +DEFTREECODE (TEMPLATE_DECL, "template_decl", tcc_declaration, 0) + +/* Index into a template parameter list. The TEMPLATE_PARM_IDX gives + the index (from 0) of the parameter, while the TEMPLATE_PARM_LEVEL + gives the level (from 1) of the parameter. + + Here's an example: + + template // Index 0, Level 1. + struct S + { + template // Index 1, Level 2. + void f(); + }; + + The DESCENDANTS will be a chain of TEMPLATE_PARM_INDEXs descended + from this one. The first descendant will have the same IDX, but + its LEVEL will be one less. The TREE_CHAIN field is used to chain + together the descendants. The TEMPLATE_PARM_DECL is the + declaration of this parameter, either a TYPE_DECL or CONST_DECL. + The TEMPLATE_PARM_ORIG_LEVEL is the LEVEL of the most distant + parent, i.e., the LEVEL that the parameter originally had when it + was declared. For example, if we instantiate S, we will have: + + struct S + { + template // Index 1, Level 1, Orig Level 2 + void f(); + }; + + The LEVEL is the level of the parameter when we are worrying about + the types of things; the ORIG_LEVEL is the level when we are + worrying about instantiating things. */ +DEFTREECODE (TEMPLATE_PARM_INDEX, "template_parm_index", tcc_exceptional, 0) + +/* Index into a template parameter list for template template parameters. + This parameter must be a type. The TYPE_FIELDS value will be a + TEMPLATE_PARM_INDEX. + + It is used without template arguments like TT in C, + TYPE_NAME is a TEMPLATE_DECL. */ +DEFTREECODE (TEMPLATE_TEMPLATE_PARM, "template_template_parm", tcc_type, 0) + +/* The ordering of the following codes is optimized for the checking + macros in tree.h. Changing the order will degrade the speed of the + compiler. TEMPLATE_TYPE_PARM, TYPENAME_TYPE, TYPEOF_TYPE, + BOUND_TEMPLATE_TEMPLATE_PARM. */ + +/* Index into a template parameter list. This parameter must be a type. + The type.values field will be a TEMPLATE_PARM_INDEX. */ +DEFTREECODE (TEMPLATE_TYPE_PARM, "template_type_parm", tcc_type, 0) + +/* A type designated by `typename T::t'. TYPE_CONTEXT is `T', + TYPE_NAME is an IDENTIFIER_NODE for `t'. If the type was named via + template-id, TYPENAME_TYPE_FULLNAME will hold the TEMPLATE_ID_EXPR. + TREE_TYPE is always NULL. */ +DEFTREECODE (TYPENAME_TYPE, "typename_type", tcc_type, 0) + +/* A type designated by `__typeof (expr)'. TYPEOF_TYPE_EXPR is the + expression in question. */ +DEFTREECODE (TYPEOF_TYPE, "typeof_type", tcc_type, 0) + +/* Like TEMPLATE_TEMPLATE_PARM it is used with bound template arguments + like TT. + In this case, TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO contains the + template name and its bound arguments. TYPE_NAME is a TYPE_DECL. */ +DEFTREECODE (BOUND_TEMPLATE_TEMPLATE_PARM, "bound_template_template_parm", + tcc_type, 0) + +/* For template template argument of the form `T::template C'. + TYPE_CONTEXT is `T', the template parameter dependent object. + TYPE_NAME is a TEMPLATE_DECL, whose DECL_TEMPLATE_PARMS are any + template parms of the instantiation. That decl's DECL_NAME is the + IDENTIFIER_NODE for `C', the member class template. */ +DEFTREECODE (UNBOUND_CLASS_TEMPLATE, "unbound_class_template", tcc_type, 0) + +/* A using declaration. USING_DECL_SCOPE contains the specified + scope. In a variadic using-declaration, this is a TYPE_PACK_EXPANSION. + In a member using decl, unless DECL_DEPENDENT_P is true, + USING_DECL_DECLS contains the _DECL or OVERLOAD so named. This is + not an alias, but is later expanded into multiple aliases. */ +DEFTREECODE (USING_DECL, "using_decl", tcc_declaration, 0) + +/* A using directive. The operand is USING_STMT_NAMESPACE. */ +DEFTREECODE (USING_STMT, "using_stmt", tcc_statement, 1) + +/* An un-parsed operand. Holds a vector of input tokens and + a vector of places where the argument was instantiated before + parsing had occurred. This is used for default arguments, delayed + NSDMIs, and noexcept-specifier parsing. */ +DEFTREECODE (DEFERRED_PARSE, "deferred_parse", tcc_exceptional, 0) + +/* An uninstantiated/unevaluated noexcept-specification. For the + uninstantiated case, DEFERRED_NOEXCEPT_PATTERN is the pattern from the + template, and DEFERRED_NOEXCEPT_ARGS are the template arguments to + substitute into the pattern when needed. For the unevaluated case, + those slots are NULL_TREE and we use get_defaulted_eh_spec to find + the exception-specification. */ +DEFTREECODE (DEFERRED_NOEXCEPT, "deferred_noexcept", tcc_exceptional, 0) + +/* A template-id, like foo. The first operand is the template. + The second is NULL if there are no explicit arguments, or a + TREE_VEC of arguments. The template will be a FUNCTION_DECL, + TEMPLATE_DECL, or an OVERLOAD. If the template-id refers to a + member template, the template may be an IDENTIFIER_NODE. */ +DEFTREECODE (TEMPLATE_ID_EXPR, "template_id_expr", tcc_expression, 2) + +/* One of a set of overloaded functions. */ +DEFTREECODE (OVERLOAD, "overload", tcc_exceptional, 0) + +/* A vector of binding slots. */ +DEFTREECODE (BINDING_VECTOR, "binding_vector", tcc_exceptional, 0) + +/* A pseudo-destructor, of the form "OBJECT.~DESTRUCTOR" or + "OBJECT.SCOPE::~DESTRUCTOR. The first operand is the OBJECT. The + second operand (if non-NULL) is the SCOPE. The third operand is + the TYPE node corresponding to the DESTRUCTOR. The type of the + first operand will always be a scalar type. + + The type of a PSEUDO_DTOR_EXPR is always "void", even though it can + be used as if it were a zero-argument function. We handle the + function-call case specially, and giving it "void" type prevents it + being used in expressions in ways that are not permitted. */ +DEFTREECODE (PSEUDO_DTOR_EXPR, "pseudo_dtor_expr", tcc_expression, 3) + +/* A whole bunch of tree codes for the initial, superficial parsing of + templates. */ +DEFTREECODE (MODOP_EXPR, "modop_expr", tcc_expression, 3) +DEFTREECODE (CAST_EXPR, "cast_expr", tcc_unary, 1) +DEFTREECODE (REINTERPRET_CAST_EXPR, "reinterpret_cast_expr", tcc_unary, 1) +DEFTREECODE (CONST_CAST_EXPR, "const_cast_expr", tcc_unary, 1) +DEFTREECODE (STATIC_CAST_EXPR, "static_cast_expr", tcc_unary, 1) +DEFTREECODE (DYNAMIC_CAST_EXPR, "dynamic_cast_expr", tcc_unary, 1) +DEFTREECODE (IMPLICIT_CONV_EXPR, "implicit_conv_expr", tcc_unary, 1) +DEFTREECODE (DOTSTAR_EXPR, "dotstar_expr", tcc_expression, 2) +DEFTREECODE (TYPEID_EXPR, "typeid_expr", tcc_expression, 1) +DEFTREECODE (NOEXCEPT_EXPR, "noexcept_expr", tcc_unary, 1) +DEFTREECODE (SPACESHIP_EXPR, "spaceship_expr", tcc_expression, 2) + +/* A placeholder for an expression that is not type-dependent, but + does occur in a template. When an expression that is not + type-dependent appears in a larger expression, we must compute the + type of that larger expression. That computation would normally + modify the original expression, which would change the mangling of + that expression if it appeared in a template argument list. In + that situation, we create a NON_DEPENDENT_EXPR to take the place of + the original expression. The expression is the only operand -- it + is only needed for diagnostics. */ +DEFTREECODE (NON_DEPENDENT_EXPR, "non_dependent_expr", tcc_expression, 1) + +/* CTOR_INITIALIZER is a placeholder in template code for a call to + setup_vtbl_pointer (and appears in all functions, not just ctors). */ +DEFTREECODE (CTOR_INITIALIZER, "ctor_initializer", tcc_expression, 1) + +DEFTREECODE (TRY_BLOCK, "try_block", tcc_statement, 2) + +DEFTREECODE (EH_SPEC_BLOCK, "eh_spec_block", tcc_statement, 2) + +/* A HANDLER wraps a catch handler for the HANDLER_TYPE. If this is + CATCH_ALL_TYPE, then the handler catches all types. The declaration of + the catch variable is in HANDLER_PARMS, and the body block in + HANDLER_BODY. */ +DEFTREECODE (HANDLER, "handler", tcc_statement, 2) + +/* A MUST_NOT_THROW_EXPR wraps an expression that may not + throw, and must call terminate if it does. The second argument + is a condition, used in templates to express noexcept (condition). */ +DEFTREECODE (MUST_NOT_THROW_EXPR, "must_not_throw_expr", tcc_expression, 2) + +/* A CLEANUP_STMT marks the point at which a declaration is fully + constructed. The CLEANUP_EXPR is run on behalf of CLEANUP_DECL + when CLEANUP_BODY completes. */ +DEFTREECODE (CLEANUP_STMT, "cleanup_stmt", tcc_statement, 3) + +/* Represents an 'if' statement. The operands are IF_COND, + THEN_CLAUSE, and ELSE_CLAUSE, and the current scope, respectively. */ +/* ??? It is currently still necessary to distinguish between IF_STMT + and COND_EXPR for the benefit of templates. */ +DEFTREECODE (IF_STMT, "if_stmt", tcc_statement, 4) + +/* Used to represent a range-based `for' statement. The operands are + RANGE_FOR_DECL, RANGE_FOR_EXPR, RANGE_FOR_BODY, RANGE_FOR_SCOPE, + RANGE_FOR_UNROLL, and RANGE_FOR_INIT_STMT, respectively. Only used in + templates. */ +DEFTREECODE (RANGE_FOR_STMT, "range_for_stmt", tcc_statement, 6) + +/* Used to represent an expression statement. Use `EXPR_STMT_EXPR' to + obtain the expression. */ +DEFTREECODE (EXPR_STMT, "expr_stmt", tcc_expression, 1) + +DEFTREECODE (TAG_DEFN, "tag_defn", tcc_expression, 0) + +/* Represents an 'offsetof' expression during template expansion. */ +DEFTREECODE (OFFSETOF_EXPR, "offsetof_expr", tcc_expression, 2) + +/* Represents an '__builtin_addressof' expression during template + expansion. This is similar to ADDR_EXPR, but it doesn't invoke + overloaded & operators. */ +DEFTREECODE (ADDRESSOF_EXPR, "addressof_expr", tcc_expression, 1) + +/* Represents the -> operator during template expansion. */ +DEFTREECODE (ARROW_EXPR, "arrow_expr", tcc_expression, 1) + +/* Represents an '__alignof__' expression during template + expansion. */ +DEFTREECODE (ALIGNOF_EXPR, "alignof_expr", tcc_expression, 1) + +/* Represents an Objective-C++ '@encode' expression during template + expansion. */ +DEFTREECODE (AT_ENCODE_EXPR, "at_encode_expr", tcc_expression, 1) + +/* A STMT_EXPR represents a statement-expression during template + expansion. This is the GCC extension { ( ... ) }. The + STMT_EXPR_STMT is the statement given by the expression. */ +DEFTREECODE (STMT_EXPR, "stmt_expr", tcc_expression, 1) + +/* Unary plus. Operand 0 is the expression to which the unary plus + is applied. */ +DEFTREECODE (UNARY_PLUS_EXPR, "unary_plus_expr", tcc_unary, 1) + +/** C++11 extensions. */ + +/* A static assertion. This is a C++11 extension. + STATIC_ASSERT_CONDITION contains the condition that is being + checked. STATIC_ASSERT_MESSAGE contains the message (a string + literal) to be displayed if the condition fails to hold. */ +DEFTREECODE (STATIC_ASSERT, "static_assert", tcc_exceptional, 0) + +/* Represents an argument pack of types (or templates). An argument + pack stores zero or more arguments that will be used to instantiate + a parameter pack. + + ARGUMENT_PACK_ARGS retrieves the arguments stored in the argument + pack. + + Example: + template + class tuple { ... }; + + tuple t; + + Values is a (template) parameter pack. When tuple is instantiated, the Values parameter pack is instantiated + with the argument pack . ARGUMENT_PACK_ARGS will + be a TREE_VEC containing int, float, and double. */ +DEFTREECODE (TYPE_ARGUMENT_PACK, "type_argument_pack", tcc_type, 0) + +/* Represents an argument pack of values, which can be used either for + non-type template arguments or function call arguments. + + NONTYPE_ARGUMENT_PACK plays precisely the same role as + TYPE_ARGUMENT_PACK, but will be used for packing non-type template + arguments (e.g., "int... Dimensions") or function arguments ("const + Args&... args"). */ +DEFTREECODE (NONTYPE_ARGUMENT_PACK, "nontype_argument_pack", tcc_expression, 1) + +/* Represents a type expression that will be expanded into a list of + types when instantiated with one or more argument packs. + + PACK_EXPANSION_PATTERN retrieves the expansion pattern. This is + the type or expression that we will substitute into with each + argument in an argument pack. + + SET_PACK_EXPANSION_PATTERN sets the expansion pattern. + + PACK_EXPANSION_PARAMETER_PACKS contains a TREE_LIST of the parameter + packs that are used in this pack expansion. + + Example: + template + struct tied : tuple { + // ... + }; + + The derivation from tuple contains a TYPE_PACK_EXPANSION for the + template arguments. Its PACK_EXPANSION_PATTERN is "Values&" and its + PACK_EXPANSION_PARAMETER_PACKS will contain "Values". */ +DEFTREECODE (TYPE_PACK_EXPANSION, "type_pack_expansion", tcc_type, 0) + +/* Represents an expression that will be expanded into a list of + expressions when instantiated with one or more argument packs. + + EXPR_PACK_EXPANSION plays precisely the same role as TYPE_PACK_EXPANSION, + but will be used for expressions. */ +DEFTREECODE (EXPR_PACK_EXPANSION, "expr_pack_expansion", tcc_expression, 3) + +/* Selects the Ith parameter out of an argument pack. This node will + be used when instantiating pack expansions; see + tsubst_pack_expansion. + + ARGUMENT_PACK_SELECT_FROM_PACK contains the *_ARGUMENT_PACK node + from which the argument will be selected. + + ARGUMENT_PACK_SELECT_INDEX contains the index into the argument + pack that will be returned by this ARGUMENT_PACK_SELECT node. The + index is a machine integer. */ +DEFTREECODE (ARGUMENT_PACK_SELECT, "argument_pack_select", tcc_exceptional, 0) + +/* Fold expressions allow the expansion of a template argument pack + over a binary operator. + + FOLD_EXPR_MOD_P is true when the fold operation is a compound assignment + operator. + + FOLD_EXPR_OP is an INTEGER_CST storing the tree code for the folded + expression. Note that when FOLDEXPR_MOD_P is true, the operator is + a compound assignment operator for that kind of expression. + + FOLD_EXPR_PACK is an expression containing an unexpanded parameter pack; + when expanded, each term becomes an argument of the folded expression. + + In a BINARY_FOLD_EXPRESSION, FOLD_EXPR_INIT is the non-pack argument. */ +DEFTREECODE (UNARY_LEFT_FOLD_EXPR, "unary_left_fold_expr", tcc_expression, 2) +DEFTREECODE (UNARY_RIGHT_FOLD_EXPR, "unary_right_fold_expr", tcc_expression, 2) +DEFTREECODE (BINARY_LEFT_FOLD_EXPR, "binary_left_fold_expr", tcc_expression, 3) +DEFTREECODE (BINARY_RIGHT_FOLD_EXPR, "binary_right_fold_expr", tcc_expression, 3) + +/* Represents the __builtin_bit_cast (type, expr) expression. + The type is in TREE_TYPE, expression in TREE_OPERAND (bitcast, 0). */ +DEFTREECODE (BIT_CAST_EXPR, "bit_cast_expr", tcc_expression, 1) + +/** C++ extensions. */ + +/* Represents a trait expression during template expansion. */ +DEFTREECODE (TRAIT_EXPR, "trait_expr", tcc_exceptional, 0) + +/* A lambda expression. This is a C++0x extension. + LAMBDA_EXPR_DEFAULT_CAPTURE_MODE is an enum for the default, which may be + none. + LAMBDA_EXPR_CAPTURE_LIST holds the capture-list, including `this'. + LAMBDA_EXPR_THIS_CAPTURE goes straight to the capture of `this', if it exists. + LAMBDA_EXPR_PENDING_PROXIES is a vector of capture proxies which need to + be pushed once scope returns to the lambda. + LAMBDA_EXPR_MUTABLE_P signals whether this lambda was declared mutable. */ +DEFTREECODE (LAMBDA_EXPR, "lambda_expr", tcc_exceptional, 0) + +/* The declared type of an expression. This is a C++0x extension. + DECLTYPE_TYPE_EXPR is the expression whose type we are computing. + DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P states whether the + expression was parsed as an id-expression or a member access + expression. When false, it was parsed as a full expression. + DECLTYPE_FOR_LAMBDA_CAPTURE is set if we want lambda capture semantics. + DECLTYPE_FOR_LAMBDA_RETURN is set if we want lambda return deduction. */ +DEFTREECODE (DECLTYPE_TYPE, "decltype_type", tcc_type, 0) + +/* A type designated by `__underlying_type (type)'. + UNDERLYING_TYPE_TYPE is the type in question. */ +DEFTREECODE (UNDERLYING_TYPE, "underlying_type", tcc_type, 0) + +/* A type designated by one of the bases type traits. + BASES_TYPE is the type in question. */ +DEFTREECODE (BASES, "bases", tcc_type, 0) + +/* Dependent operator expressions are given this type rather than a NULL_TREE + type so that we have somewhere to stash the result of phase 1 name lookup + (namely into DEPENDENT_OPERATOR_TYPE_SAVED_LOOKUPS). */ +DEFTREECODE (DEPENDENT_OPERATOR_TYPE, "dependent_operator_type", tcc_type, 0) + +/* Used to represent the template information stored by template + specializations. + The accessors are: + TI_TEMPLATE the template declaration associated to the specialization + TI_ARGS the arguments of the template specialization + TI_TYPEDEFS_NEEDING_ACCESS_CHECKING the vector of typedefs used in + the pattern of the template for which access check is needed at template + instantiation time. */ +DEFTREECODE (TEMPLATE_INFO, "template_info", tcc_exceptional, 0) + +/* OpenMP - #pragma omp depobj + Operand 0: OMP_DEPOBJ_DEPOBJ: Depobj expression + Operand 1: OMP_DEPOBJ_CLAUSES: List of clauses. */ +DEFTREECODE (OMP_DEPOBJ, "omp_depobj", tcc_statement, 2) + +/* Extensions for Concepts. */ + +/* Concept definition. This is not entirely different than a VAR_DECL + except that a) it must be a template, and b) doesn't have the wide + range of value and linkage options available to variables. */ +DEFTREECODE (CONCEPT_DECL, "concept_decl", tcc_declaration, 0) + +/* Used to represent information associated with constrained declarations. */ +DEFTREECODE (CONSTRAINT_INFO, "constraint_info", tcc_exceptional, 0) + +/* A wildcard declaration is a placeholder for a template parameter + used to resolve constrained-type-names in concepts. During + resolution, the matching argument is saved as the TREE_TYPE + of the wildcard. */ +DEFTREECODE (WILDCARD_DECL, "wildcard_decl", tcc_declaration, 0) + +/* A requires-expr has three operands. The first operand is + its parameter list (possibly NULL). The second is a list of + requirements, which are denoted by the _REQ* tree codes + below. The third is a TREE_VEC of template arguments to + be applied when substituting into the parameter list and + requirements, set by tsubst_requires_expr for partial instantiations. */ +DEFTREECODE (REQUIRES_EXPR, "requires_expr", tcc_expression, 3) + +/* A requirement for an expression. */ +DEFTREECODE (SIMPLE_REQ, "simple_req", tcc_expression, 1) + +/* A requirement for a type. */ +DEFTREECODE (TYPE_REQ, "type_req", tcc_expression, 1) + +/* A requirement for an expression and its properties. The + first operand is the expression, and the 2nd is its type. + The accessor COMPOUND_REQ_NOEXCEPT determines whether + the noexcept keyword was present. */ +DEFTREECODE (COMPOUND_REQ, "compound_req", tcc_expression, 2) + +/* A requires clause within a requires expression. */ +DEFTREECODE (NESTED_REQ, "nested_req", tcc_expression, 1) + +/* Constraints are modeled as kinds of expressions. + The operands of a constraint can be either types or expressions. + Unlike expressions, constraints do not have a type. */ + +/* An atomic constraint evaluates an expression E. The operand of the + constraint is its parameter mapping. The actual expression is stored + in the context. + + ATOMIC_CONSTR_INFO provides source info to support diagnostics. + ATOMIC_CONSTR_EXPR has the expression to be evaluated. + ATOMIC_CONSTR_PARMS is the parameter mapping for the atomic constraint + and is stored in the type field. */ +DEFTREECODE (ATOMIC_CONSTR, "atomic_constr", tcc_expression, 1) + +/* The conjunction and disjunction of two constraints, respectively. + Operands are accessed using TREE_OPERAND. The third operand provides + source info for diagnostics. + + CONJ_CONSTR_INFO and DISJ_CONSTR_INFO provide access to the source + information of constraints, which is stored in the TREE_TYPE. */ +DEFTREECODE (CONJ_CONSTR, "conj_constr", tcc_expression, 2) +DEFTREECODE (DISJ_CONSTR, "disj_constr", tcc_expression, 2) + +/* A check constraint represents the checking of a concept + C. It has two operands: the template defining the concept + and a sequence of template arguments. + + CHECK_CONSTR_CONCEPT has the concept definition + CHECK_CONSTR_ARGUMENTS are the template arguments */ +DEFTREECODE (CHECK_CONSTR, "check_constr", tcc_expression, 2) + +/* The co_await expression is used to support coroutines. + + Op 0 is the cast expresssion (potentially modified by the + promise "await_transform()" method). + Op1 is a proxy for the temp / coro frame slot 'e' value. + Op2 is the initialiser for Op1 (Op0, potentially modified by any + applicable 'co_await' operator). + Op3 is a vector of the [0] e.ready, [1] e.suspend and [2] e.resume calls. + Op4 is a mode : 0 (await) 1 (yield) 2 (initial) 3 (final) */ +DEFTREECODE (CO_AWAIT_EXPR, "co_await", tcc_expression, 5) + +/* The co_yield expression is used to support coroutines. + + Op0 is the original expr (for use in diagnostics) + Op2 is the co_await derived from this. */ +DEFTREECODE (CO_YIELD_EXPR, "co_yield", tcc_expression, 2) + +/* The co_return expression is used to support coroutines. + + Op0 is the original expr, can be void (for use in diagnostics) + Op1 is the promise return_xxxx call for for the expression given. */ + +DEFTREECODE (CO_RETURN_EXPR, "co_return", tcc_statement, 2) + +/* +Local variables: +mode:c +End: +*/ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/cp-tree.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/cp-tree.h new file mode 100644 index 0000000..72f4398 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/cp-tree.h @@ -0,0 +1,8706 @@ +/* Definitions for -*- C++ -*- parsing and type checking. + Copyright (C) 1987-2022 Free Software Foundation, Inc. + Contributed by Michael Tiemann (tiemann@cygnus.com) + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CP_TREE_H +#define GCC_CP_TREE_H + +#include "tm.h" +#include "hard-reg-set.h" +#include "function.h" + +/* In order for the format checking to accept the C++ front end + diagnostic framework extensions, you must include this file before + diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE + in c-common.h. */ +#undef GCC_DIAG_STYLE +#define GCC_DIAG_STYLE __gcc_cxxdiag__ +#if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H) +#error \ +In order for the format checking to accept the C++ front end diagnostic \ +framework extensions, you must include this file before diagnostic-core.h and \ +c-common.h, not after. +#endif +#include "c-family/c-common.h" +#include "diagnostic.h" + +/* A tree node, together with a location, so that we can track locations + (and ranges) during parsing. + + The location is redundant for node kinds that have locations, + but not all node kinds do (e.g. constants, and references to + params, locals, etc), so we stash a copy here. */ + +extern location_t cp_expr_location (const_tree); + +class cp_expr +{ +public: + cp_expr () : + m_value (NULL), m_loc (UNKNOWN_LOCATION) {} + + cp_expr (tree value) : + m_value (value), m_loc (cp_expr_location (m_value)) {} + + cp_expr (tree value, location_t loc): + m_value (value), m_loc (loc) + { + protected_set_expr_location (value, loc); + } + + /* Implicit conversions to tree. */ + operator tree () const { return m_value; } + tree & operator* () { return m_value; } + tree operator* () const { return m_value; } + tree & operator-> () { return m_value; } + tree operator-> () const { return m_value; } + + tree get_value () const { return m_value; } + location_t get_location () const { return m_loc; } + location_t get_start () const + { + source_range src_range = get_range_from_loc (line_table, m_loc); + return src_range.m_start; + } + location_t get_finish () const + { + source_range src_range = get_range_from_loc (line_table, m_loc); + return src_range.m_finish; + } + + void set_location (location_t loc) + { + protected_set_expr_location (m_value, loc); + m_loc = loc; + } + + void set_range (location_t start, location_t finish) + { + set_location (make_location (m_loc, start, finish)); + } + + cp_expr& maybe_add_location_wrapper () + { + m_value = maybe_wrap_with_location (m_value, m_loc); + return *this; + } + + private: + tree m_value; + location_t m_loc; +}; + +inline bool +operator == (const cp_expr &lhs, tree rhs) +{ + return lhs.get_value () == rhs; +} + + +enum cp_tree_index +{ + CPTI_WCHAR_DECL, + CPTI_VTABLE_ENTRY_TYPE, + CPTI_DELTA_TYPE, + CPTI_VTABLE_INDEX_TYPE, + CPTI_CLEANUP_TYPE, + CPTI_VTT_PARM_TYPE, + + CPTI_CLASS_TYPE, + CPTI_UNKNOWN_TYPE, + CPTI_INIT_LIST_TYPE, + CPTI_EXPLICIT_VOID_LIST, + CPTI_VTBL_TYPE, + CPTI_VTBL_PTR_TYPE, + CPTI_GLOBAL, + CPTI_ABORT_FNDECL, + CPTI_AGGR_TAG, + CPTI_CONV_OP_MARKER, + + CPTI_CTOR_IDENTIFIER, + CPTI_COMPLETE_CTOR_IDENTIFIER, + CPTI_BASE_CTOR_IDENTIFIER, + CPTI_DTOR_IDENTIFIER, + CPTI_COMPLETE_DTOR_IDENTIFIER, + CPTI_BASE_DTOR_IDENTIFIER, + CPTI_DELETING_DTOR_IDENTIFIER, + CPTI_CONV_OP_IDENTIFIER, + CPTI_DELTA_IDENTIFIER, + CPTI_IN_CHARGE_IDENTIFIER, + CPTI_VTT_PARM_IDENTIFIER, + CPTI_AS_BASE_IDENTIFIER, + CPTI_THIS_IDENTIFIER, + CPTI_PFN_IDENTIFIER, + CPTI_VPTR_IDENTIFIER, + CPTI_GLOBAL_IDENTIFIER, + CPTI_ANON_IDENTIFIER, + CPTI_AUTO_IDENTIFIER, + CPTI_DECLTYPE_AUTO_IDENTIFIER, + CPTI_INIT_LIST_IDENTIFIER, + CPTI_FOR_RANGE__IDENTIFIER, + CPTI_FOR_BEGIN__IDENTIFIER, + CPTI_FOR_END__IDENTIFIER, + CPTI_FOR_RANGE_IDENTIFIER, + CPTI_FOR_BEGIN_IDENTIFIER, + CPTI_FOR_END_IDENTIFIER, + CPTI_ABI_TAG_IDENTIFIER, + CPTI_ALIGNED_IDENTIFIER, + CPTI_BEGIN_IDENTIFIER, + CPTI_END_IDENTIFIER, + CPTI_GET_IDENTIFIER, + CPTI_GNU_IDENTIFIER, + CPTI_TUPLE_ELEMENT_IDENTIFIER, + CPTI_TUPLE_SIZE_IDENTIFIER, + CPTI_TYPE_IDENTIFIER, + CPTI_VALUE_IDENTIFIER, + CPTI_FUN_IDENTIFIER, + CPTI_CLOSURE_IDENTIFIER, + CPTI_HEAP_UNINIT_IDENTIFIER, + CPTI_HEAP_IDENTIFIER, + CPTI_HEAP_DELETED_IDENTIFIER, + CPTI_HEAP_VEC_UNINIT_IDENTIFIER, + CPTI_HEAP_VEC_IDENTIFIER, + CPTI_OMP_IDENTIFIER, + + CPTI_LANG_NAME_C, + CPTI_LANG_NAME_CPLUSPLUS, + + CPTI_EMPTY_EXCEPT_SPEC, + CPTI_NOEXCEPT_TRUE_SPEC, + CPTI_NOEXCEPT_FALSE_SPEC, + CPTI_NOEXCEPT_DEFERRED_SPEC, + + CPTI_NULLPTR, + CPTI_NULLPTR_TYPE, + + CPTI_ANY_TARG, + + CPTI_MODULE_HWM, + /* Nodes after here change during compilation, or should not be in + the module's global tree table. Such nodes must be locatable + via name lookup or type-construction, as those are the only + cross-TU matching capabilities remaining. */ + + /* We must find these via the global namespace. */ + CPTI_STD, + CPTI_ABI, + + /* These are created at init time, but the library/headers provide + definitions. */ + CPTI_ALIGN_TYPE, + CPTI_TERMINATE_FN, + CPTI_CALL_UNEXPECTED_FN, + + /* These are lazily inited. */ + CPTI_CONST_TYPE_INFO_TYPE, + CPTI_GET_EXCEPTION_PTR_FN, + CPTI_BEGIN_CATCH_FN, + CPTI_END_CATCH_FN, + CPTI_ALLOCATE_EXCEPTION_FN, + CPTI_FREE_EXCEPTION_FN, + CPTI_THROW_FN, + CPTI_RETHROW_FN, + CPTI_ATEXIT_FN_PTR_TYPE, + CPTI_ATEXIT, + CPTI_DSO_HANDLE, + CPTI_DCAST, + + CPTI_SOURCE_LOCATION_IMPL, + + CPTI_FALLBACK_DFLOAT32_TYPE, + CPTI_FALLBACK_DFLOAT64_TYPE, + CPTI_FALLBACK_DFLOAT128_TYPE, + + CPTI_MAX +}; + +extern GTY(()) tree cp_global_trees[CPTI_MAX]; + +#define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL] +#define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE] +/* The type used to represent an offset by which to adjust the `this' + pointer in pointer-to-member types. */ +#define delta_type_node cp_global_trees[CPTI_DELTA_TYPE] +/* The type used to represent an index into the vtable. */ +#define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE] + +#define class_type_node cp_global_trees[CPTI_CLASS_TYPE] +#define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE] +#define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE] +#define explicit_void_list_node cp_global_trees[CPTI_EXPLICIT_VOID_LIST] +#define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE] +#define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE] +#define std_node cp_global_trees[CPTI_STD] +#define abi_node cp_global_trees[CPTI_ABI] +#define global_namespace cp_global_trees[CPTI_GLOBAL] +#define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE] +#define conv_op_marker cp_global_trees[CPTI_CONV_OP_MARKER] +#define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL] +#define current_aggr cp_global_trees[CPTI_AGGR_TAG] +#define nullptr_node cp_global_trees[CPTI_NULLPTR] +#define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE] +/* std::align_val_t */ +#define align_type_node cp_global_trees[CPTI_ALIGN_TYPE] + +/* We cache these tree nodes so as to call get_identifier less frequently. + For identifiers for functions, including special member functions such + as ctors and assignment operators, the nodes can be used (among other + things) to iterate over their overloads defined by/for a type. For + example: + + tree ovlid = assign_op_identifier; + tree overloads = get_class_binding (type, ovlid); + for (ovl_iterator it (overloads); it; ++it) { ... } + + iterates over the set of implicitly and explicitly defined overloads + of the assignment operator for type (including the copy and move + assignment operators, whether deleted or not). */ + +/* The name of a constructor that takes an in-charge parameter to + decide whether or not to construct virtual base classes. */ +#define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER] +/* The name of a constructor that constructs virtual base classes. */ +#define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER] +/* The name of a constructor that does not construct virtual base classes. */ +#define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER] +/* The name of a destructor that takes an in-charge parameter to + decide whether or not to destroy virtual base classes and whether + or not to delete the object. */ +#define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER] +/* The name of a destructor that destroys virtual base classes. */ +#define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER] +/* The name of a destructor that does not destroy virtual base + classes. */ +#define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER] +/* The name of a destructor that destroys virtual base classes, and + then deletes the entire object. */ +#define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER] + +/* The name used for conversion operators -- but note that actual + conversion functions use special identifiers outside the identifier + table. */ +#define conv_op_identifier cp_global_trees[CPTI_CONV_OP_IDENTIFIER] + +#define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER] +#define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER] +/* The name of the parameter that contains a pointer to the VTT to use + for this subobject constructor or destructor. */ +#define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER] +#define as_base_identifier cp_global_trees[CPTI_AS_BASE_IDENTIFIER] +#define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER] +#define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER] +#define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER] +/* The name of the ::, std & anon namespaces. */ +#define global_identifier cp_global_trees[CPTI_GLOBAL_IDENTIFIER] +#define anon_identifier cp_global_trees[CPTI_ANON_IDENTIFIER] +/* auto and declspec(auto) identifiers. */ +#define auto_identifier cp_global_trees[CPTI_AUTO_IDENTIFIER] +#define decltype_auto_identifier cp_global_trees[CPTI_DECLTYPE_AUTO_IDENTIFIER] +#define init_list_identifier cp_global_trees[CPTI_INIT_LIST_IDENTIFIER] +#define for_range__identifier cp_global_trees[CPTI_FOR_RANGE__IDENTIFIER] +#define for_begin__identifier cp_global_trees[CPTI_FOR_BEGIN__IDENTIFIER] +#define for_end__identifier cp_global_trees[CPTI_FOR_END__IDENTIFIER] +#define for_range_identifier cp_global_trees[CPTI_FOR_RANGE_IDENTIFIER] +#define for_begin_identifier cp_global_trees[CPTI_FOR_BEGIN_IDENTIFIER] +#define for_end_identifier cp_global_trees[CPTI_FOR_END_IDENTIFIER] +#define abi_tag_identifier cp_global_trees[CPTI_ABI_TAG_IDENTIFIER] +#define aligned_identifier cp_global_trees[CPTI_ALIGNED_IDENTIFIER] +#define begin_identifier cp_global_trees[CPTI_BEGIN_IDENTIFIER] +#define end_identifier cp_global_trees[CPTI_END_IDENTIFIER] +#define get__identifier cp_global_trees[CPTI_GET_IDENTIFIER] +#define gnu_identifier cp_global_trees[CPTI_GNU_IDENTIFIER] +#define tuple_element_identifier cp_global_trees[CPTI_TUPLE_ELEMENT_IDENTIFIER] +#define tuple_size_identifier cp_global_trees[CPTI_TUPLE_SIZE_IDENTIFIER] +#define type_identifier cp_global_trees[CPTI_TYPE_IDENTIFIER] +#define value_identifier cp_global_trees[CPTI_VALUE_IDENTIFIER] +#define fun_identifier cp_global_trees[CPTI_FUN_IDENTIFIER] +#define closure_identifier cp_global_trees[CPTI_CLOSURE_IDENTIFIER] +#define heap_uninit_identifier cp_global_trees[CPTI_HEAP_UNINIT_IDENTIFIER] +#define heap_identifier cp_global_trees[CPTI_HEAP_IDENTIFIER] +#define heap_deleted_identifier cp_global_trees[CPTI_HEAP_DELETED_IDENTIFIER] +#define heap_vec_uninit_identifier cp_global_trees[CPTI_HEAP_VEC_UNINIT_IDENTIFIER] +#define heap_vec_identifier cp_global_trees[CPTI_HEAP_VEC_IDENTIFIER] +#define omp_identifier cp_global_trees[CPTI_OMP_IDENTIFIER] +#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C] +#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS] + +/* Exception specifiers used for throw(), noexcept(true), + noexcept(false) and deferred noexcept. We rely on these being + uncloned. */ +#define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC] +#define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC] +#define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC] +#define noexcept_deferred_spec cp_global_trees[CPTI_NOEXCEPT_DEFERRED_SPEC] + +/* Exception handling function declarations. */ +#define terminate_fn cp_global_trees[CPTI_TERMINATE_FN] +#define call_unexpected_fn cp_global_trees[CPTI_CALL_UNEXPECTED_FN] +#define get_exception_ptr_fn cp_global_trees[CPTI_GET_EXCEPTION_PTR_FN] +#define begin_catch_fn cp_global_trees[CPTI_BEGIN_CATCH_FN] +#define end_catch_fn cp_global_trees[CPTI_END_CATCH_FN] +#define allocate_exception_fn cp_global_trees[CPTI_ALLOCATE_EXCEPTION_FN] +#define free_exception_fn cp_global_trees[CPTI_FREE_EXCEPTION_FN] +#define throw_fn cp_global_trees[CPTI_THROW_FN] +#define rethrow_fn cp_global_trees[CPTI_RETHROW_FN] + +/* The type of the function-pointer argument to "__cxa_atexit" (or + "std::atexit", if "__cxa_atexit" is not being used). */ +#define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE] + +/* A pointer to `std::atexit'. */ +#define atexit_node cp_global_trees[CPTI_ATEXIT] + +/* A pointer to `__dso_handle'. */ +#define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE] + +/* The declaration of the dynamic_cast runtime. */ +#define dynamic_cast_node cp_global_trees[CPTI_DCAST] + +/* The type of a destructor. */ +#define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE] + +/* The type of the vtt parameter passed to subobject constructors and + destructors. */ +#define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE] + +/* A node which matches any template argument. */ +#define any_targ_node cp_global_trees[CPTI_ANY_TARG] + +/* std::source_location::__impl class. */ +#define source_location_impl cp_global_trees[CPTI_SOURCE_LOCATION_IMPL] + +/* Node to indicate default access. This must be distinct from the + access nodes in tree.h. */ + +#define access_default_node null_node + +/* Variant of dfloat{32,64,128}_type_node only used for fundamental + rtti purposes if DFP is disabled. */ +#define fallback_dfloat32_type cp_global_trees[CPTI_FALLBACK_DFLOAT32_TYPE] +#define fallback_dfloat64_type cp_global_trees[CPTI_FALLBACK_DFLOAT64_TYPE] +#define fallback_dfloat128_type cp_global_trees[CPTI_FALLBACK_DFLOAT128_TYPE] + + +#include "name-lookup.h" + +/* Usage of TREE_LANG_FLAG_?: + 0: IDENTIFIER_KIND_BIT_0 (in IDENTIFIER_NODE) + NEW_EXPR_USE_GLOBAL (in NEW_EXPR). + COND_EXPR_IS_VEC_DELETE (in COND_EXPR). + DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR). + CLEANUP_P (in TRY_BLOCK) + AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR) + PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF) + PAREN_STRING_LITERAL_P (in STRING_CST) + CP_DECL_THREAD_LOCAL_P (in VAR_DECL) + KOENIG_LOOKUP_P (in CALL_EXPR) + STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST). + EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT) + STMT_EXPR_NO_SCOPE (in STMT_EXPR) + BIND_EXPR_TRY_BLOCK (in BIND_EXPR) + TYPENAME_IS_ENUM_P (in TYPENAME_TYPE) + OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD, OMP_DISTRIBUTE, + and OMP_TASKLOOP) + BASELINK_QUALIFIED_P (in BASELINK) + TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR) + TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX) + ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute) + ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag) + LAMBDA_CAPTURE_EXPLICIT_P (in a TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST) + PARENTHESIZED_LIST_P (in the TREE_LIST for a parameter-declaration-list) + CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR) + LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR) + DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE) + VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR) + DECL_OVERRIDE_P (in FUNCTION_DECL) + IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR) + TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR) + CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR) + PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION) + TINFO_HAS_ACCESS_ERRORS (in TEMPLATE_INFO) + SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR) + COMPOUND_REQ_NOEXCEPT_P (in COMPOUND_REQ) + WILDCARD_PACK_P (in WILDCARD_DECL) + BLOCK_OUTER_CURLY_BRACE_P (in BLOCK) + FOLD_EXPR_MODOP_P (*_FOLD_EXPR) + IF_STMT_CONSTEXPR_P (IF_STMT) + DECL_NAMESPACE_INLINE_P (in NAMESPACE_DECL) + SWITCH_STMT_ALL_CASES_P (in SWITCH_STMT) + REINTERPRET_CAST_P (in NOP_EXPR) + ALIGNOF_EXPR_STD_P (in ALIGNOF_EXPR) + OVL_DEDUP_P (in OVERLOAD) + ATOMIC_CONSTR_MAP_INSTANTIATED_P (in ATOMIC_CONSTR) + 1: IDENTIFIER_KIND_BIT_1 (in IDENTIFIER_NODE) + TI_PENDING_TEMPLATE_FLAG. + TEMPLATE_PARMS_FOR_INLINE. + DELETE_EXPR_USE_VEC (in DELETE_EXPR). + (TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out). + ICS_ELLIPSIS_FLAG (in _CONV) + DECL_INITIALIZED_P (in VAR_DECL) + TYPENAME_IS_CLASS_P (in TYPENAME_TYPE) + STMT_IS_FULL_EXPR_P (in _STMT) + TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR) + LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR) + DECL_FINAL_P (in FUNCTION_DECL) + QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF) + CONSTRUCTOR_IS_DEPENDENT (in CONSTRUCTOR) + TINFO_USED_TEMPLATE_ID (in TEMPLATE_INFO) + PACK_EXPANSION_SIZEOF_P (in *_PACK_EXPANSION) + OVL_USING_P (in OVERLOAD) + IMPLICIT_CONV_EXPR_NONTYPE_ARG (in IMPLICIT_CONV_EXPR) + BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P (in BASELINK) + BIND_EXPR_VEC_DTOR (in BIND_EXPR) + ATOMIC_CONSTR_EXPR_FROM_CONCEPT_P (in ATOMIC_CONSTR) + 2: IDENTIFIER_KIND_BIT_2 (in IDENTIFIER_NODE) + ICS_THIS_FLAG (in _CONV) + DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL) + STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST) + TYPENAME_IS_RESOLVING_P (in TYPENAME_TYPE) + TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR) + FNDECL_USED_AUTO (in FUNCTION_DECL) + DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE) + REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF, SCOPE_REF, + VIEW_CONVERT_EXPR, PAREN_EXPR) + AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR) + CONSTRUCTOR_MUTABLE_POISON (in CONSTRUCTOR) + OVL_HIDDEN_P (in OVERLOAD) + IF_STMT_CONSTEVAL_P (in IF_STMT) + SWITCH_STMT_NO_BREAK_P (in SWITCH_STMT) + LAMBDA_EXPR_CAPTURE_OPTIMIZED (in LAMBDA_EXPR) + IMPLICIT_CONV_EXPR_BRACED_INIT (in IMPLICIT_CONV_EXPR) + PACK_EXPANSION_AUTO_P (in *_PACK_EXPANSION) + 3: IMPLICIT_RVALUE_P (in NON_LVALUE_EXPR or STATIC_CAST_EXPR) + ICS_BAD_FLAG (in _CONV) + FN_TRY_BLOCK_P (in TRY_BLOCK) + BIND_EXPR_BODY_BLOCK (in BIND_EXPR) + CALL_EXPR_ORDERED_ARGS (in CALL_EXPR, AGGR_INIT_EXPR) + DECLTYPE_FOR_REF_CAPTURE (in DECLTYPE_TYPE) + CONSTRUCTOR_C99_COMPOUND_LITERAL (in CONSTRUCTOR) + OVL_NESTED_P (in OVERLOAD) + DECL_MODULE_EXPORT_P (in _DECL) + PACK_EXPANSION_FORCE_EXTRA_ARGS_P (in *_PACK_EXPANSION) + 4: IDENTIFIER_MARKED (IDENTIFIER_NODEs) + TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR, + CALL_EXPR, or FIELD_DECL). + DECL_TINFO_P (in VAR_DECL, TYPE_DECL) + FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE) + OVL_LOOKUP_P (in OVERLOAD) + LOOKUP_FOUND_P (in RECORD_TYPE, UNION_TYPE, ENUMERAL_TYPE, NAMESPACE_DECL) + FNDECL_MANIFESTLY_CONST_EVALUATED (in FUNCTION_DECL) + 5: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE) + FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE) + CALL_EXPR_REVERSE_ARGS (in CALL_EXPR, AGGR_INIT_EXPR) + CONSTRUCTOR_PLACEHOLDER_BOUNDARY (in CONSTRUCTOR) + OVL_EXPORT_P (in OVERLOAD) + 6: TYPE_MARKED_P (in _TYPE) + DECL_NONTRIVIALLY_INITIALIZED_P (in VAR_DECL) + RANGE_FOR_IVDEP (in RANGE_FOR_STMT) + CALL_EXPR_OPERATOR_SYNTAX (in CALL_EXPR, AGGR_INIT_EXPR) + CONSTRUCTOR_IS_DESIGNATED_INIT (in CONSTRUCTOR) + + Usage of TYPE_LANG_FLAG_?: + 0: TYPE_DEPENDENT_P + 1: TYPE_HAS_USER_CONSTRUCTOR. + 2: TYPE_HAS_LATE_RETURN_TYPE (in FUNCTION_TYPE, METHOD_TYPE) + TYPE_PTRMEMFUNC_FLAG (in RECORD_TYPE) + 4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR + 5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE) + ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE) + AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM) + 6: TYPE_DEPENDENT_P_VALID + + Usage of DECL_LANG_FLAG_?: + 0: DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL) + DECL_LOCAL_DECL_P (in FUNCTION_DECL, VAR_DECL) + DECL_MUTABLE_P (in FIELD_DECL) + DECL_DEPENDENT_P (in USING_DECL) + LABEL_DECL_BREAK (in LABEL_DECL) + 1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL). + DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL) + DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL) + USING_DECL_TYPENAME_P (in USING_DECL) + DECL_VLA_CAPTURE_P (in FIELD_DECL) + DECL_ARRAY_PARAMETER_P (in PARM_DECL) + LABEL_DECL_CONTINUE (in LABEL_DECL) + 2: DECL_THIS_EXTERN (in VAR_DECL, FUNCTION_DECL or PARM_DECL) + DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL) + DECL_CONSTRAINT_VAR_P (in a PARM_DECL) + TEMPLATE_DECL_COMPLEX_ALIAS_P (in TEMPLATE_DECL) + DECL_INSTANTIATING_NSDMI_P (in a FIELD_DECL) + LABEL_DECL_CDTOR (in LABEL_DECL) + USING_DECL_UNRELATED_P (in USING_DECL) + 3: DECL_IN_AGGR_P. + 4: DECL_C_BIT_FIELD (in a FIELD_DECL) + DECL_ANON_UNION_VAR_P (in a VAR_DECL) + DECL_SELF_REFERENCE_P (in a TYPE_DECL) + DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL) + DECL_UNINSTANIATED_TEMPLATE_FRIEND_P (in TEMPLATE_DECL) + 5: DECL_INTERFACE_KNOWN. + 6: DECL_THIS_STATIC (in VAR_DECL, FUNCTION_DECL or PARM_DECL) + DECL_FIELD_IS_BASE (in FIELD_DECL) + TYPE_DECL_ALIAS_P (in TYPE_DECL) + 7: DECL_THUNK_P (in a member FUNCTION_DECL) + DECL_NORMAL_CAPTURE_P (in FIELD_DECL) + DECL_DECLARED_CONSTINIT_P (in VAR_DECL) + 8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL) + + Usage of language-independent fields in a language-dependent manner: + + TYPE_ALIAS_SET + This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so + forth as a substitute for the mark bits provided in `lang_type'. + At present, only the six low-order bits are used. + + TYPE_LANG_SLOT_1 + For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS. + For a POINTER_TYPE (to a METHOD_TYPE), this is TYPE_PTRMEMFUNC_TYPE. + For an ENUMERAL_TYPE, BOUND_TEMPLATE_TEMPLATE_PARM_TYPE, + RECORD_TYPE or UNION_TYPE this is TYPE_TEMPLATE_INFO, + + BINFO_VIRTUALS + For a binfo, this is a TREE_LIST. There is an entry for each + virtual function declared either in BINFO or its direct and + indirect primary bases. + + The BV_DELTA of each node gives the amount by which to adjust the + `this' pointer when calling the function. If the method is an + overridden version of a base class method, then it is assumed + that, prior to adjustment, the this pointer points to an object + of the base class. + + The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable + index of the vcall offset for this entry. + + The BV_FN is the declaration for the virtual function itself. + + If BV_LOST_PRIMARY is set, it means that this entry is for a lost + primary virtual base and can be left null in the vtable. + + BINFO_VTABLE + This is an expression with POINTER_TYPE that gives the value + to which the vptr should be initialized. Use get_vtbl_decl_for_binfo + to extract the VAR_DECL for the complete vtable. + + DECL_VINDEX + This field is NULL for a non-virtual function. For a virtual + function, it is eventually set to an INTEGER_CST indicating the + index in the vtable at which this function can be found. When + a virtual function is declared, but before it is known what + function is overridden, this field is the error_mark_node. + + Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is + the virtual function this one overrides, and whose TREE_CHAIN is + the old DECL_VINDEX. */ + +/* Language-specific tree checkers. */ + +#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \ + TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL) + +#define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \ + TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL) + +#define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \ + (TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \ + || TREE_CODE (NODE) == FUNCTION_DECL) + +#define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \ + TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL) + +#define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \ + TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) + +#define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \ + TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) + +#define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \ + TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM) + +#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) + +/* Returns t iff the node can have a TEMPLATE_INFO field. */ + +inline tree +template_info_decl_check (const_tree t, const char* f, int l, const char* fn) +{ + switch (TREE_CODE (t)) + { + case VAR_DECL: + case FUNCTION_DECL: + case FIELD_DECL: + case TYPE_DECL: + case CONCEPT_DECL: + case TEMPLATE_DECL: + return const_cast(t); + default: + break; + } + tree_check_failed (t, f, l, fn, + VAR_DECL, FUNCTION_DECL, FIELD_DECL, TYPE_DECL, + CONCEPT_DECL, TEMPLATE_DECL, 0); + gcc_unreachable (); +} + +#define TEMPLATE_INFO_DECL_CHECK(NODE) \ + template_info_decl_check ((NODE), __FILE__, __LINE__, __FUNCTION__) + +#define THUNK_FUNCTION_CHECK(NODE) __extension__ \ +({ __typeof (NODE) const __t = (NODE); \ + if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \ + || !__t->decl_common.lang_specific->u.fn.thunk_p) \ + tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \ + __t; }) + +#else /* ENABLE_TREE_CHECKING */ + +#define TEMPLATE_INFO_DECL_CHECK(NODE) (NODE) +#define THUNK_FUNCTION_CHECK(NODE) (NODE) + +#endif /* ENABLE_TREE_CHECKING */ + +/* Language-dependent contents of an identifier. */ + +struct GTY(()) lang_identifier { + struct c_common_identifier c_common; + cxx_binding *bindings; +}; + +/* Return a typed pointer version of T if it designates a + C++ front-end identifier. */ +inline lang_identifier* +identifier_p (tree t) +{ + if (TREE_CODE (t) == IDENTIFIER_NODE) + return (lang_identifier*) t; + return NULL; +} + +#define LANG_IDENTIFIER_CAST(NODE) \ + ((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE)) + +struct GTY(()) template_parm_index { + struct tree_common common; + int index; + int level; + int orig_level; + tree decl; +}; + +struct GTY(()) ptrmem_cst { + struct tree_common common; + tree member; + location_t locus; +}; +typedef struct ptrmem_cst * ptrmem_cst_t; + +#define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE)) + +#define BIND_EXPR_TRY_BLOCK(NODE) \ + TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE)) + +/* This BIND_EXPR is from build_vec_delete_1. */ +#define BIND_EXPR_VEC_DTOR(NODE) \ + TREE_LANG_FLAG_1 (BIND_EXPR_CHECK (NODE)) + +/* Used to mark the block around the member initializers and cleanups. */ +#define BIND_EXPR_BODY_BLOCK(NODE) \ + TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE)) +#define FUNCTION_NEEDS_BODY_BLOCK(NODE) \ + (DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \ + || LAMBDA_FUNCTION_P (NODE)) + +#define STATEMENT_LIST_NO_SCOPE(NODE) \ + TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE)) +#define STATEMENT_LIST_TRY_BLOCK(NODE) \ + TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE)) + +/* Mark the outer curly brace BLOCK. */ +#define BLOCK_OUTER_CURLY_BRACE_P(NODE) TREE_LANG_FLAG_0 (BLOCK_CHECK (NODE)) + +/* Nonzero if this statement should be considered a full-expression, + i.e., if temporaries created during this statement should have + their destructors run at the end of this statement. */ +#define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE)) + +/* Marks the result of a statement expression. */ +#define EXPR_STMT_STMT_EXPR_RESULT(NODE) \ + TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE)) + +/* Nonzero if this statement-expression does not have an associated scope. */ +#define STMT_EXPR_NO_SCOPE(NODE) \ + TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE)) + +#define COND_EXPR_IS_VEC_DELETE(NODE) \ + TREE_LANG_FLAG_0 (COND_EXPR_CHECK (NODE)) + +/* Nonzero if this NOP_EXPR is a reinterpret_cast. Such conversions + are not constexprs. Other NOP_EXPRs are. */ +#define REINTERPRET_CAST_P(NODE) \ + TREE_LANG_FLAG_0 (NOP_EXPR_CHECK (NODE)) + +/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual + sense of `same'. */ +#define same_type_p(TYPE1, TYPE2) \ + comptypes ((TYPE1), (TYPE2), COMPARE_STRICT) + +/* Returns nonzero iff NODE is a declaration for the global function + `main'. */ +#define DECL_MAIN_P(NODE) \ + (DECL_EXTERN_C_FUNCTION_P (NODE) \ + && DECL_NAME (NODE) != NULL_TREE \ + && MAIN_NAME_P (DECL_NAME (NODE)) \ + && flag_hosted) + +/* Lookup walker marking. */ +#define LOOKUP_SEEN_P(NODE) TREE_VISITED (NODE) +#define LOOKUP_FOUND_P(NODE) \ + TREE_LANG_FLAG_4 (TREE_CHECK4 (NODE,RECORD_TYPE,UNION_TYPE,ENUMERAL_TYPE,\ + NAMESPACE_DECL)) + +/* These two accessors should only be used by OVL manipulators. + Other users should use iterators and convenience functions. */ +#define OVL_FUNCTION(NODE) \ + (((struct tree_overload*)OVERLOAD_CHECK (NODE))->function) +#define OVL_CHAIN(NODE) \ + (((struct tree_overload*)OVERLOAD_CHECK (NODE))->common.chain) + +/* If set, this or a subsequent overload contains decls that need deduping. */ +#define OVL_DEDUP_P(NODE) TREE_LANG_FLAG_0 (OVERLOAD_CHECK (NODE)) +/* If set, this was imported in a using declaration. */ +#define OVL_USING_P(NODE) TREE_LANG_FLAG_1 (OVERLOAD_CHECK (NODE)) +/* If set, this overload is a hidden decl. */ +#define OVL_HIDDEN_P(NODE) TREE_LANG_FLAG_2 (OVERLOAD_CHECK (NODE)) +/* If set, this overload contains a nested overload. */ +#define OVL_NESTED_P(NODE) TREE_LANG_FLAG_3 (OVERLOAD_CHECK (NODE)) +/* If set, this overload was constructed during lookup. */ +#define OVL_LOOKUP_P(NODE) TREE_LANG_FLAG_4 (OVERLOAD_CHECK (NODE)) +/* If set, this OVL_USING_P overload is exported. */ +#define OVL_EXPORT_P(NODE) TREE_LANG_FLAG_5 (OVERLOAD_CHECK (NODE)) + +/* The first decl of an overload. */ +#define OVL_FIRST(NODE) ovl_first (NODE) +/* The name of the overload set. */ +#define OVL_NAME(NODE) DECL_NAME (OVL_FIRST (NODE)) + +/* Whether this is a set of overloaded functions. TEMPLATE_DECLS are + always wrapped in an OVERLOAD, so we don't need to check them + here. */ +#define OVL_P(NODE) \ + (TREE_CODE (NODE) == FUNCTION_DECL || TREE_CODE (NODE) == OVERLOAD) +/* Whether this is a single member overload. */ +#define OVL_SINGLE_P(NODE) \ + (TREE_CODE (NODE) != OVERLOAD || !OVL_CHAIN (NODE)) + +/* OVL_HIDDEN_P nodes come before other nodes. */ + +struct GTY(()) tree_overload { + struct tree_common common; + tree function; +}; + +/* Iterator for a 1 dimensional overload. Permits iterating over the + outer level of a 2-d overload when explicitly enabled. */ + +class ovl_iterator { + tree ovl; + const bool allow_inner; /* Only used when checking. */ + + public: + explicit ovl_iterator (tree o, bool allow = false) + : ovl (o), allow_inner (allow) + { + } + + public: + operator bool () const + { + return ovl; + } + ovl_iterator &operator++ () + { + ovl = TREE_CODE (ovl) != OVERLOAD ? NULL_TREE : OVL_CHAIN (ovl); + return *this; + } + tree operator* () const + { + tree fn = TREE_CODE (ovl) != OVERLOAD ? ovl : OVL_FUNCTION (ovl); + + /* Check this is not an unexpected 2-dimensional overload. */ + gcc_checking_assert (allow_inner || TREE_CODE (fn) != OVERLOAD); + + return fn; + } + bool operator== (const ovl_iterator &o) const + { + return ovl == o.ovl; + } + tree get_using () const + { + gcc_checking_assert (using_p ()); + return ovl; + } + + public: + /* Whether this overload was introduced by a using decl. */ + bool using_p () const + { + return (TREE_CODE (ovl) == USING_DECL + || (TREE_CODE (ovl) == OVERLOAD && OVL_USING_P (ovl))); + } + /* Whether this using is being exported. */ + bool exporting_p () const + { + return OVL_EXPORT_P (get_using ()); + } + + bool hidden_p () const + { + return TREE_CODE (ovl) == OVERLOAD && OVL_HIDDEN_P (ovl); + } + + public: + tree remove_node (tree head) + { + return remove_node (head, ovl); + } + tree reveal_node (tree head) + { + return reveal_node (head, ovl); + } + + protected: + /* If we have a nested overload, point at the inner overload and + return the next link on the outer one. */ + tree maybe_push () + { + tree r = NULL_TREE; + + if (ovl && TREE_CODE (ovl) == OVERLOAD && OVL_NESTED_P (ovl)) + { + r = OVL_CHAIN (ovl); + ovl = OVL_FUNCTION (ovl); + } + return r; + } + /* Restore an outer nested overload. */ + void pop (tree outer) + { + gcc_checking_assert (!ovl); + ovl = outer; + } + + private: + /* We make these static functions to avoid the address of the + iterator escaping the local context. */ + static tree remove_node (tree head, tree node); + static tree reveal_node (tree ovl, tree node); +}; + +/* Treat a tree as a range of ovl_iterator, e.g. + for (tree f : ovl_range (fns)) { ... } */ + +class ovl_range +{ + tree t; + bool allow; +public: + explicit ovl_range (tree t, bool allow = false): t(t), allow(allow) { } + ovl_iterator begin() { return ovl_iterator (t, allow); } + ovl_iterator end() { return ovl_iterator (NULL_TREE, allow); } +}; + +/* Iterator over a (potentially) 2 dimensional overload, which is + produced by name lookup. */ + +class lkp_iterator : public ovl_iterator { + typedef ovl_iterator parent; + + tree outer; + + public: + explicit lkp_iterator (tree o) + : parent (o, true), outer (maybe_push ()) + { + } + + public: + lkp_iterator &operator++ () + { + bool repush = !outer; + + if (!parent::operator++ () && !repush) + { + pop (outer); + repush = true; + } + + if (repush) + outer = maybe_push (); + + return *this; + } +}; + +/* Treat a tree as a range of lkp_iterator, e.g. + for (tree f : lkp_range (fns)) { ... } */ + +class lkp_range +{ + tree t; +public: + lkp_range (tree t): t(t) { } + lkp_iterator begin() { return lkp_iterator (t); } + lkp_iterator end() { return lkp_iterator (NULL_TREE); } +}; + +/* hash traits for declarations. Hashes potential overload sets via + DECL_NAME. */ + +struct named_decl_hash : ggc_remove { + typedef tree value_type; /* A DECL or OVERLOAD */ + typedef tree compare_type; /* An identifier. */ + + inline static hashval_t hash (const value_type decl); + inline static bool equal (const value_type existing, compare_type candidate); + + static const bool empty_zero_p = true; + static inline void mark_empty (value_type &p) {p = NULL_TREE;} + static inline bool is_empty (value_type p) {return !p;} + + /* Nothing is deletable. Everything is insertable. */ + static bool is_deleted (value_type) { return false; } + static void mark_deleted (value_type) { gcc_unreachable (); } +}; + +/* Simplified unique_ptr clone to release a tree vec on exit. */ + +class releasing_vec +{ +public: + typedef vec vec_t; + + releasing_vec (vec_t *v): v(v) { } + releasing_vec (): v(make_tree_vector ()) { } + + /* Copy ops are deliberately declared but not defined, + copies must always be elided. */ + releasing_vec (const releasing_vec &); + releasing_vec &operator= (const releasing_vec &); + + vec_t &operator* () const { return *v; } + vec_t *operator-> () const { return v; } + vec_t *get() const { return v; } + operator vec_t *() const { return v; } + vec_t ** operator& () { return &v; } + + /* Breaks pointer/value consistency for convenience. This takes ptrdiff_t + rather than unsigned to avoid ambiguity with the built-in operator[] + (bootstrap/91828). */ + tree& operator[] (ptrdiff_t i) const { return (*v)[i]; } + + tree *begin() { return ::begin (v); } + tree *end() { return ::end (v); } + + void release () { release_tree_vector (v); v = NULL; } + + ~releasing_vec () { release_tree_vector (v); } +private: + vec_t *v; +}; +/* Forwarding functions for vec_safe_* that might reallocate. */ +inline tree* vec_safe_push (releasing_vec& r, const tree &t CXX_MEM_STAT_INFO) +{ return vec_safe_push (*&r, t PASS_MEM_STAT); } +inline bool vec_safe_reserve (releasing_vec& r, unsigned n, bool e = false CXX_MEM_STAT_INFO) +{ return vec_safe_reserve (*&r, n, e PASS_MEM_STAT); } +inline unsigned vec_safe_length (releasing_vec &r) +{ return r->length(); } +inline void vec_safe_splice (releasing_vec &r, vec *p CXX_MEM_STAT_INFO) +{ vec_safe_splice (*&r, p PASS_MEM_STAT); } +void release_tree_vector (releasing_vec &); // cause link error + +struct GTY(()) tree_template_decl { + struct tree_decl_common common; + tree arguments; + tree result; +}; + +/* Returns true iff NODE is a BASELINK. */ +#define BASELINK_P(NODE) \ + (TREE_CODE (NODE) == BASELINK) +/* The BINFO indicating the base in which lookup found the + BASELINK_FUNCTIONS. */ +#define BASELINK_BINFO(NODE) \ + (((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo) +/* The functions referred to by the BASELINK; either a FUNCTION_DECL, + a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */ +#define BASELINK_FUNCTIONS(NODE) \ + (((struct tree_baselink*) BASELINK_CHECK (NODE))->functions) +/* If T is a BASELINK, grab the functions, otherwise just T, which is + expected to already be a (list of) functions. */ +#define MAYBE_BASELINK_FUNCTIONS(T) \ + (BASELINK_P (T) ? BASELINK_FUNCTIONS (T) : T) +/* The BINFO in which the search for the functions indicated by this baselink + began. This base is used to determine the accessibility of functions + selected by overload resolution. */ +#define BASELINK_ACCESS_BINFO(NODE) \ + (((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo) +/* For a type-conversion operator, the BASELINK_OPTYPE indicates the type + to which the conversion should occur. This value is important if + the BASELINK_FUNCTIONS include a template conversion operator -- + the BASELINK_OPTYPE can be used to determine what type the user + requested. */ +#define BASELINK_OPTYPE(NODE) \ + (TREE_CHAIN (BASELINK_CHECK (NODE))) +/* Nonzero if this baselink was from a qualified lookup. */ +#define BASELINK_QUALIFIED_P(NODE) \ + TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE)) +/* Nonzero if the overload set for this baselink might be incomplete due + to the lookup being performed from an incomplete-class context. */ +#define BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P(NODE) \ + TREE_LANG_FLAG_1 (BASELINK_CHECK (NODE)) + +struct GTY(()) tree_baselink { + struct tree_common common; + tree binfo; + tree functions; + tree access_binfo; +}; + +/* The different kinds of ids that we encounter. */ + +enum cp_id_kind +{ + /* Not an id at all. */ + CP_ID_KIND_NONE, + /* An unqualified-id that is not a template-id. */ + CP_ID_KIND_UNQUALIFIED, + /* An unqualified-id that is a dependent name. */ + CP_ID_KIND_UNQUALIFIED_DEPENDENT, + /* An unqualified template-id. */ + CP_ID_KIND_TEMPLATE_ID, + /* A qualified-id. */ + CP_ID_KIND_QUALIFIED +}; + + +/* The various kinds of C++0x warnings we encounter. */ + +enum cpp0x_warn_str +{ + /* extended initializer lists */ + CPP0X_INITIALIZER_LISTS, + /* explicit conversion operators */ + CPP0X_EXPLICIT_CONVERSION, + /* variadic templates */ + CPP0X_VARIADIC_TEMPLATES, + /* lambda expressions */ + CPP0X_LAMBDA_EXPR, + /* C++0x auto */ + CPP0X_AUTO, + /* scoped enums */ + CPP0X_SCOPED_ENUMS, + /* defaulted and deleted functions */ + CPP0X_DEFAULTED_DELETED, + /* inline namespaces */ + CPP0X_INLINE_NAMESPACES, + /* override controls, override/final */ + CPP0X_OVERRIDE_CONTROLS, + /* non-static data member initializers */ + CPP0X_NSDMI, + /* user defined literals */ + CPP0X_USER_DEFINED_LITERALS, + /* delegating constructors */ + CPP0X_DELEGATING_CTORS, + /* inheriting constructors */ + CPP0X_INHERITING_CTORS, + /* C++11 attributes */ + CPP0X_ATTRIBUTES, + /* ref-qualified member functions */ + CPP0X_REF_QUALIFIER +}; + +/* The various kinds of operation used by composite_pointer_type. */ + +enum composite_pointer_operation +{ + /* comparison */ + CPO_COMPARISON, + /* conversion */ + CPO_CONVERSION, + /* conditional expression */ + CPO_CONDITIONAL_EXPR +}; + +/* Possible cases of expression list used by build_x_compound_expr_from_list. */ +enum expr_list_kind { + ELK_INIT, /* initializer */ + ELK_MEM_INIT, /* member initializer */ + ELK_FUNC_CAST /* functional cast */ +}; + +/* Possible cases of implicit bad rhs conversions. */ +enum impl_conv_rhs { + ICR_DEFAULT_ARGUMENT, /* default argument */ + ICR_CONVERTING, /* converting */ + ICR_INIT, /* initialization */ + ICR_ARGPASS, /* argument passing */ + ICR_RETURN, /* return */ + ICR_ASSIGN /* assignment */ +}; + +/* Possible cases of implicit or explicit bad conversions to void. */ +enum impl_conv_void { + ICV_CAST, /* (explicit) conversion to void */ + ICV_SECOND_OF_COND, /* second operand of conditional expression */ + ICV_THIRD_OF_COND, /* third operand of conditional expression */ + ICV_RIGHT_OF_COMMA, /* right operand of comma operator */ + ICV_LEFT_OF_COMMA, /* left operand of comma operator */ + ICV_STATEMENT, /* statement */ + ICV_THIRD_IN_FOR /* for increment expression */ +}; + +/* Possible invalid uses of an abstract class that might not have a + specific associated declaration. */ +enum GTY(()) abstract_class_use { + ACU_UNKNOWN, /* unknown or decl provided */ + ACU_CAST, /* cast to abstract class */ + ACU_NEW, /* new-expression of abstract class */ + ACU_THROW, /* throw-expression of abstract class */ + ACU_CATCH, /* catch-parameter of abstract class */ + ACU_ARRAY, /* array of abstract class */ + ACU_RETURN, /* return type of abstract class */ + ACU_PARM /* parameter type of abstract class */ +}; + +/* Macros for access to language-specific slots in an identifier. */ + +/* Identifiers map directly to block or class-scope bindings. + Namespace-scope bindings are held in hash tables on the respective + namespaces. The identifier bindings are the innermost active + binding, from whence you can get the decl and/or implicit-typedef + of an elaborated type. When not bound to a local entity the + values are NULL. */ +#define IDENTIFIER_BINDING(NODE) \ + (LANG_IDENTIFIER_CAST (NODE)->bindings) +#define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE) +#define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE)) + +/* Kinds of identifiers. Values are carefully chosen. */ +enum cp_identifier_kind { + cik_normal = 0, /* Not a special identifier. */ + cik_keyword = 1, /* A keyword. */ + cik_ctor = 2, /* Constructor (in-chg, complete or base). */ + cik_dtor = 3, /* Destructor (in-chg, deleting, complete or + base). */ + cik_simple_op = 4, /* Non-assignment operator name. */ + cik_assign_op = 5, /* An assignment operator name. */ + cik_conv_op = 6, /* Conversion operator name. */ + cik_reserved_for_udlit = 7, /* Not yet in use */ + cik_max +}; + +/* Kind bits. */ +#define IDENTIFIER_KIND_BIT_0(NODE) \ + TREE_LANG_FLAG_0 (IDENTIFIER_NODE_CHECK (NODE)) +#define IDENTIFIER_KIND_BIT_1(NODE) \ + TREE_LANG_FLAG_1 (IDENTIFIER_NODE_CHECK (NODE)) +#define IDENTIFIER_KIND_BIT_2(NODE) \ + TREE_LANG_FLAG_2 (IDENTIFIER_NODE_CHECK (NODE)) + +/* Used by various search routines. */ +#define IDENTIFIER_MARKED(NODE) \ + TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (NODE)) + +/* Nonzero if this identifier is used as a virtual function name somewhere + (optimizes searches). */ +#define IDENTIFIER_VIRTUAL_P(NODE) \ + TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (NODE)) + +/* True if this identifier is a reserved word. C_RID_CODE (node) is + then the RID_* value of the keyword. Value 1. */ +#define IDENTIFIER_KEYWORD_P(NODE) \ + ((!IDENTIFIER_KIND_BIT_2 (NODE)) \ + & (!IDENTIFIER_KIND_BIT_1 (NODE)) \ + & IDENTIFIER_KIND_BIT_0 (NODE)) + +/* True if this identifier is the name of a constructor or + destructor. Value 2 or 3. */ +#define IDENTIFIER_CDTOR_P(NODE) \ + ((!IDENTIFIER_KIND_BIT_2 (NODE)) \ + & IDENTIFIER_KIND_BIT_1 (NODE)) + +/* True if this identifier is the name of a constructor. Value 2. */ +#define IDENTIFIER_CTOR_P(NODE) \ + (IDENTIFIER_CDTOR_P(NODE) \ + & (!IDENTIFIER_KIND_BIT_0 (NODE))) + +/* True if this identifier is the name of a destructor. Value 3. */ +#define IDENTIFIER_DTOR_P(NODE) \ + (IDENTIFIER_CDTOR_P(NODE) \ + & IDENTIFIER_KIND_BIT_0 (NODE)) + +/* True if this identifier is for any operator name (including + conversions). Value 4, 5, 6 or 7. */ +#define IDENTIFIER_ANY_OP_P(NODE) \ + (IDENTIFIER_KIND_BIT_2 (NODE)) + +/* True if this identifier is for an overloaded operator. Values 4, 5. */ +#define IDENTIFIER_OVL_OP_P(NODE) \ + (IDENTIFIER_ANY_OP_P (NODE) \ + & (!IDENTIFIER_KIND_BIT_1 (NODE))) + +/* True if this identifier is for any assignment. Values 5. */ +#define IDENTIFIER_ASSIGN_OP_P(NODE) \ + (IDENTIFIER_OVL_OP_P (NODE) \ + & IDENTIFIER_KIND_BIT_0 (NODE)) + +/* True if this identifier is the name of a type-conversion + operator. Value 7. */ +#define IDENTIFIER_CONV_OP_P(NODE) \ + (IDENTIFIER_ANY_OP_P (NODE) \ + & IDENTIFIER_KIND_BIT_1 (NODE) \ + & (!IDENTIFIER_KIND_BIT_0 (NODE))) + +/* True if this identifier is a new or delete operator. */ +#define IDENTIFIER_NEWDEL_OP_P(NODE) \ + (IDENTIFIER_OVL_OP_P (NODE) \ + && IDENTIFIER_OVL_OP_FLAGS (NODE) & OVL_OP_FLAG_ALLOC) + +/* True if this identifier is a new operator. */ +#define IDENTIFIER_NEW_OP_P(NODE) \ + (IDENTIFIER_OVL_OP_P (NODE) \ + && (IDENTIFIER_OVL_OP_FLAGS (NODE) \ + & (OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE)) == OVL_OP_FLAG_ALLOC) + +/* Access a C++-specific index for identifier NODE. + Used to optimize operator mappings etc. */ +#define IDENTIFIER_CP_INDEX(NODE) \ + (IDENTIFIER_NODE_CHECK(NODE)->base.u.bits.address_space) + +/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ +#define C_TYPE_FIELDS_READONLY(TYPE) \ + (LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly) + +/* The tokens stored in the unparsed operand. */ + +#define DEFPARSE_TOKENS(NODE) \ + (((struct tree_deferred_parse *)DEFERRED_PARSE_CHECK (NODE))->tokens) +#define DEFPARSE_INSTANTIATIONS(NODE) \ + (((struct tree_deferred_parse *)DEFERRED_PARSE_CHECK (NODE))->instantiations) + +struct GTY (()) tree_deferred_parse { + struct tree_base base; + struct cp_token_cache *tokens; + vec *instantiations; +}; + + +#define DEFERRED_NOEXCEPT_PATTERN(NODE) \ + (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern) +#define DEFERRED_NOEXCEPT_ARGS(NODE) \ + (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args) +#define DEFERRED_NOEXCEPT_SPEC_P(NODE) \ + ((NODE) && (TREE_PURPOSE (NODE)) \ + && (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT)) +#define UNEVALUATED_NOEXCEPT_SPEC_P(NODE) \ + (DEFERRED_NOEXCEPT_SPEC_P (NODE) \ + && DEFERRED_NOEXCEPT_PATTERN (TREE_PURPOSE (NODE)) == NULL_TREE) +#define UNPARSED_NOEXCEPT_SPEC_P(NODE) \ + ((NODE) && (TREE_PURPOSE (NODE)) \ + && (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_PARSE)) + +struct GTY (()) tree_deferred_noexcept { + struct tree_base base; + tree pattern; + tree args; +}; + + +/* The condition associated with the static assertion. This must be + an integral constant expression. */ +#define STATIC_ASSERT_CONDITION(NODE) \ + (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition) + +/* The message associated with the static assertion. This must be a + string constant, which will be emitted as an error message when the + static assert condition is false. */ +#define STATIC_ASSERT_MESSAGE(NODE) \ + (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message) + +/* Source location information for a static assertion. */ +#define STATIC_ASSERT_SOURCE_LOCATION(NODE) \ + (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location) + +struct GTY (()) tree_static_assert { + struct tree_common common; + tree condition; + tree message; + location_t location; +}; + +struct GTY (()) tree_argument_pack_select { + struct tree_common common; + tree argument_pack; + int index; +}; + +/* The different kinds of traits that we encounter. */ + +enum cp_trait_kind +{ + CPTK_BASES, + CPTK_DIRECT_BASES, + CPTK_HAS_NOTHROW_ASSIGN, + CPTK_HAS_NOTHROW_CONSTRUCTOR, + CPTK_HAS_NOTHROW_COPY, + CPTK_HAS_TRIVIAL_ASSIGN, + CPTK_HAS_TRIVIAL_CONSTRUCTOR, + CPTK_HAS_TRIVIAL_COPY, + CPTK_HAS_TRIVIAL_DESTRUCTOR, + CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS, + CPTK_HAS_VIRTUAL_DESTRUCTOR, + CPTK_IS_ABSTRACT, + CPTK_IS_AGGREGATE, + CPTK_IS_BASE_OF, + CPTK_IS_CLASS, + CPTK_IS_EMPTY, + CPTK_IS_ENUM, + CPTK_IS_FINAL, + CPTK_IS_LAYOUT_COMPATIBLE, + CPTK_IS_LITERAL_TYPE, + CPTK_IS_POINTER_INTERCONVERTIBLE_BASE_OF, + CPTK_IS_POD, + CPTK_IS_POLYMORPHIC, + CPTK_IS_SAME_AS, + CPTK_IS_STD_LAYOUT, + CPTK_IS_TRIVIAL, + CPTK_IS_TRIVIALLY_ASSIGNABLE, + CPTK_IS_TRIVIALLY_CONSTRUCTIBLE, + CPTK_IS_TRIVIALLY_COPYABLE, + CPTK_IS_UNION, + CPTK_UNDERLYING_TYPE, + CPTK_IS_ASSIGNABLE, + CPTK_IS_CONSTRUCTIBLE, + CPTK_IS_NOTHROW_ASSIGNABLE, + CPTK_IS_NOTHROW_CONSTRUCTIBLE +}; + +/* The types that we are processing. */ +#define TRAIT_EXPR_TYPE1(NODE) \ + (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1) + +#define TRAIT_EXPR_TYPE2(NODE) \ + (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2) + +/* The specific trait that we are processing. */ +#define TRAIT_EXPR_KIND(NODE) \ + (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind) + +#define TRAIT_EXPR_LOCATION(NODE) \ + (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->locus) + +struct GTY (()) tree_trait_expr { + struct tree_common common; + tree type1; + tree type2; + location_t locus; + enum cp_trait_kind kind; +}; + +/* Identifiers used for lambda types are almost anonymous. Use this + spare flag to distinguish them (they also have the anonymous flag). */ +#define IDENTIFIER_LAMBDA_P(NODE) \ + (IDENTIFIER_NODE_CHECK(NODE)->base.protected_flag) + +/* Based off of TYPE_UNNAMED_P. */ +#define LAMBDA_TYPE_P(NODE) \ + (TREE_CODE (NODE) == RECORD_TYPE \ + && TYPE_LINKAGE_IDENTIFIER (NODE) \ + && IDENTIFIER_LAMBDA_P (TYPE_LINKAGE_IDENTIFIER (NODE))) + +/* Test if FUNCTION_DECL is a lambda function. */ +#define LAMBDA_FUNCTION_P(FNDECL) \ + (DECL_DECLARES_FUNCTION_P (FNDECL) \ + && DECL_OVERLOADED_OPERATOR_P (FNDECL) \ + && DECL_OVERLOADED_OPERATOR_IS (FNDECL, CALL_EXPR) \ + && LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL))) + +enum cp_lambda_default_capture_mode_type { + CPLD_NONE, + CPLD_COPY, + CPLD_REFERENCE +}; + +/* The method of default capture, if any. */ +#define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \ + (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode) + +/* The capture-list, including `this'. Each capture is stored as a FIELD_DECL + * so that the name, type, and field are all together, whether or not it has + * been added to the lambda's class type. + TREE_LIST: + TREE_PURPOSE: The FIELD_DECL for this capture. + TREE_VALUE: The initializer. This is part of a GNU extension. */ +#define LAMBDA_EXPR_CAPTURE_LIST(NODE) \ + (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list) + +/* During parsing of the lambda-introducer, the node in the capture-list + that holds the 'this' capture. During parsing of the body, the + capture proxy for that node. */ +#define LAMBDA_EXPR_THIS_CAPTURE(NODE) \ + (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture) + +/* Predicate tracking whether `this' is in the effective capture set. */ +#define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \ + LAMBDA_EXPR_THIS_CAPTURE(NODE) + +/* Predicate tracking whether the lambda was declared 'mutable'. */ +#define LAMBDA_EXPR_MUTABLE_P(NODE) \ + TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE)) + +/* True iff uses of a const variable capture were optimized away. */ +#define LAMBDA_EXPR_CAPTURE_OPTIMIZED(NODE) \ + TREE_LANG_FLAG_2 (LAMBDA_EXPR_CHECK (NODE)) + +/* True if this TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST is for an explicit + capture. */ +#define LAMBDA_CAPTURE_EXPLICIT_P(NODE) \ + TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) + +/* The source location of the lambda. */ +#define LAMBDA_EXPR_LOCATION(NODE) \ + (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus) + +/* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL, + FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */ +#define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \ + (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope) + +/* If EXTRA_SCOPE, this is the number of the lambda within that scope. */ +#define LAMBDA_EXPR_DISCRIMINATOR(NODE) \ + (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator) + +/* During parsing of the lambda, a vector of capture proxies which need + to be pushed once we're done processing a nested lambda. */ +#define LAMBDA_EXPR_PENDING_PROXIES(NODE) \ + (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies) + +/* If NODE was regenerated via tsubst_lambda_expr, this is a TEMPLATE_INFO + whose TI_TEMPLATE is the immediate LAMBDA_EXPR from which NODE was + regenerated, and TI_ARGS is the full set of template arguments used + to regenerate NODE from the most general lambda. */ +#define LAMBDA_EXPR_REGEN_INFO(NODE) \ + (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->regen_info) + +/* The closure type of the lambda, which is also the type of the + LAMBDA_EXPR. */ +#define LAMBDA_EXPR_CLOSURE(NODE) \ + (TREE_TYPE (LAMBDA_EXPR_CHECK (NODE))) + +struct GTY (()) tree_lambda_expr +{ + struct tree_typed typed; + tree capture_list; + tree this_capture; + tree extra_scope; + tree regen_info; + vec *pending_proxies; + location_t locus; + enum cp_lambda_default_capture_mode_type default_capture_mode : 8; + short int discriminator; +}; + +/* Non-zero if this template specialization has access violations that + should be rechecked when the function is instantiated outside argument + deduction. */ +#define TINFO_HAS_ACCESS_ERRORS(NODE) \ + (TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE))) +#define FNDECL_HAS_ACCESS_ERRORS(NODE) \ + (TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE))) + +/* Non-zero if this variable template specialization was specified using a + template-id, so it's a partial or full specialization and not a definition + of the member template of a particular class specialization. */ +#define TINFO_USED_TEMPLATE_ID(NODE) \ + (TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE))) + +/* The representation of a deferred access check. */ + +struct GTY(()) deferred_access_check { + /* The base class in which the declaration is referenced. */ + tree binfo; + /* The declaration whose access must be checked. */ + tree decl; + /* The declaration that should be used in the error message. */ + tree diag_decl; + /* The location of this access. */ + location_t loc; +}; + +struct GTY(()) tree_template_info { + struct tree_base base; + tree tmpl; + tree args; + vec *deferred_access_checks; +}; + +// Constraint information for a C++ declaration. Constraint information is +// comprised of: +// +// - a constraint expression introduced by the template header +// - a constraint expression introduced by a function declarator +// - the associated constraints, which are the conjunction of those, +// and used for declaration matching +// +// The template and declarator requirements are kept to support pretty +// printing constrained declarations. +struct GTY(()) tree_constraint_info { + struct tree_base base; + tree template_reqs; + tree declarator_reqs; + tree associated_constr; +}; + +// Require that pointer P is non-null before returning. +template +inline T* +check_nonnull (T* p) +{ + gcc_assert (p); + return p; +} + +/* Returns true iff T is non-null and represents constraint info. */ +inline tree_constraint_info * +check_constraint_info (tree t) +{ + if (t && TREE_CODE (t) == CONSTRAINT_INFO) + return (tree_constraint_info *)t; + return NULL; +} + +/* Access the expression describing the template constraints. This may be + null if no constraints were introduced in the template parameter list, + a requirements clause after the template parameter list, or constraints + through a constrained-type-specifier. */ +#define CI_TEMPLATE_REQS(NODE) \ + check_constraint_info (check_nonnull (NODE))->template_reqs + +/* Access the expression describing the trailing constraints. This is non-null + for any implicit instantiation of a constrained declaration. For a + templated declaration it is non-null only when a trailing requires-clause + was specified. */ +#define CI_DECLARATOR_REQS(NODE) \ + check_constraint_info (check_nonnull (NODE))->declarator_reqs + +/* The computed associated constraint expression for a declaration. */ +#define CI_ASSOCIATED_CONSTRAINTS(NODE) \ + check_constraint_info (check_nonnull (NODE))->associated_constr + +/* Access the constraint-expression introduced by the requires-clause + associate the template parameter list NODE. */ +#define TEMPLATE_PARMS_CONSTRAINTS(NODE) \ + TREE_TYPE (TREE_LIST_CHECK (NODE)) + +/* Access the logical constraints on the template parameter declaration + indicated by NODE. */ +#define TEMPLATE_PARM_CONSTRAINTS(NODE) \ + TREE_TYPE (TREE_LIST_CHECK (NODE)) + +/* Non-zero if the noexcept is present in a compound requirement. */ +#define COMPOUND_REQ_NOEXCEPT_P(NODE) \ + TREE_LANG_FLAG_0 (TREE_CHECK (NODE, COMPOUND_REQ)) + +/* A TREE_LIST whose TREE_VALUE is the constraints on the 'auto' placeholder + type NODE, used in an argument deduction constraint. The TREE_PURPOSE + holds the set of template parameters that were in-scope when this 'auto' + was formed. */ +#define PLACEHOLDER_TYPE_CONSTRAINTS_INFO(NODE) \ + DECL_SIZE_UNIT (TYPE_NAME (NODE)) + +/* The constraints on the 'auto' placeholder type NODE. */ +#define PLACEHOLDER_TYPE_CONSTRAINTS(NODE) \ + (PLACEHOLDER_TYPE_CONSTRAINTS_INFO (NODE) \ + ? TREE_VALUE (PLACEHOLDER_TYPE_CONSTRAINTS_INFO (NODE)) \ + : NULL_TREE) + +/* True if NODE is a constraint. */ +#define CONSTR_P(NODE) \ + (TREE_CODE (NODE) == ATOMIC_CONSTR \ + || TREE_CODE (NODE) == CONJ_CONSTR \ + || TREE_CODE (NODE) == DISJ_CONSTR) + +/* Valid for any normalized constraint. */ +#define CONSTR_CHECK(NODE) \ + TREE_CHECK3 (NODE, ATOMIC_CONSTR, CONJ_CONSTR, DISJ_CONSTR) + +/* The CONSTR_INFO stores normalization data for a constraint. It refers to + the original expression and the expression or declaration + from which the constraint was normalized. + + This is TREE_LIST whose TREE_PURPOSE is the original expression and whose + TREE_VALUE is a list of contexts. */ +#define CONSTR_INFO(NODE) \ + TREE_TYPE (CONSTR_CHECK (NODE)) + +/* The expression evaluated by the constraint. */ +#define CONSTR_EXPR(NODE) \ + TREE_PURPOSE (CONSTR_INFO (NODE)) + +/* The expression or declaration from which this constraint was normalized. + This is a TREE_LIST whose TREE_VALUE is either a template-id expression + denoting a concept check or the declaration introducing the constraint. + These are chained to other context objects. */ +#define CONSTR_CONTEXT(NODE) \ + TREE_VALUE (CONSTR_INFO (NODE)) + +/* The parameter mapping for an atomic constraint. */ +#define ATOMIC_CONSTR_MAP(NODE) \ + TREE_OPERAND (TREE_CHECK (NODE, ATOMIC_CONSTR), 0) + +/* Whether the parameter mapping of this atomic constraint + is already instantiated with concrete template arguments. + Used only in satisfy_atom and in the satisfaction cache. */ +#define ATOMIC_CONSTR_MAP_INSTANTIATED_P(NODE) \ + TREE_LANG_FLAG_0 (ATOMIC_CONSTR_CHECK (NODE)) + +/* Whether the expression for this atomic constraint belongs to a + concept definition. */ +#define ATOMIC_CONSTR_EXPR_FROM_CONCEPT_P(NODE) \ + TREE_LANG_FLAG_1 (ATOMIC_CONSTR_CHECK (NODE)) + +/* The expression of an atomic constraint. */ +#define ATOMIC_CONSTR_EXPR(NODE) \ + CONSTR_EXPR (ATOMIC_CONSTR_CHECK (NODE)) + +/* The concept of a concept check. */ +#define CHECK_CONSTR_CONCEPT(NODE) \ + TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 0) + +/* The template arguments of a concept check. */ +#define CHECK_CONSTR_ARGS(NODE) \ + TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 1) + +/* Whether a PARM_DECL represents a local parameter in a + requires-expression. */ +#define CONSTRAINT_VAR_P(NODE) \ + DECL_LANG_FLAG_2 (TREE_CHECK (NODE, PARM_DECL)) + +/* The concept constraining this constrained template-parameter. */ +#define CONSTRAINED_PARM_CONCEPT(NODE) \ + DECL_SIZE_UNIT (TYPE_DECL_CHECK (NODE)) +/* Any extra template arguments specified for a constrained + template-parameter. */ +#define CONSTRAINED_PARM_EXTRA_ARGS(NODE) \ + DECL_SIZE (TYPE_DECL_CHECK (NODE)) +/* The first template parameter of CONSTRAINED_PARM_CONCEPT to be used as a + prototype for the constrained parameter in finish_shorthand_constraint, + attached for convenience. */ +#define CONSTRAINED_PARM_PROTOTYPE(NODE) \ + DECL_INITIAL (TYPE_DECL_CHECK (NODE)) + +/* Module flags on FUNCTION,VAR,TYPE,CONCEPT or NAMESPACE + A TEMPLATE_DECL holds them on the DECL_TEMPLATE_RESULT object -- + it's just not practical to keep them consistent. */ +#define DECL_MODULE_CHECK(NODE) \ + TREE_NOT_CHECK (NODE, TEMPLATE_DECL) + +/* In the purview of a module (including header unit). */ +#define DECL_MODULE_PURVIEW_P(N) \ + (DECL_LANG_SPECIFIC (DECL_MODULE_CHECK (N))->u.base.module_purview_p) + +/* True if the live version of the decl was imported. */ +#define DECL_MODULE_IMPORT_P(NODE) \ + (DECL_LANG_SPECIFIC (DECL_MODULE_CHECK (NODE))->u.base.module_import_p) + +/* True if this decl is in the entity hash & array. This means that + some variant was imported, even if DECL_MODULE_IMPORT_P is false. */ +#define DECL_MODULE_ENTITY_P(NODE) \ + (DECL_LANG_SPECIFIC (DECL_MODULE_CHECK (NODE))->u.base.module_entity_p) + +/* DECL that has attached decls for ODR-relatedness. */ +#define DECL_MODULE_ATTACHMENTS_P(NODE) \ + (DECL_LANG_SPECIFIC (TREE_CHECK2(NODE,FUNCTION_DECL,VAR_DECL))\ + ->u.base.module_attached_p) + +/* Whether this is an exported DECL. Held on any decl that can appear + at namespace scope (function, var, type, template, const or + namespace). templates copy from their template_result, consts have + it for unscoped enums. */ +#define DECL_MODULE_EXPORT_P(NODE) TREE_LANG_FLAG_3 (NODE) + + +/* The list of local parameters introduced by this requires-expression, + in the form of a chain of PARM_DECLs. */ +#define REQUIRES_EXPR_PARMS(NODE) \ + TREE_OPERAND (TREE_CHECK (NODE, REQUIRES_EXPR), 0) + +/* A TREE_LIST of the requirements for this requires-expression. + The requirements are stored in lexical order within the TREE_VALUE + of each TREE_LIST node. The TREE_PURPOSE of each node is unused. */ +#define REQUIRES_EXPR_REQS(NODE) \ + TREE_OPERAND (TREE_CHECK (NODE, REQUIRES_EXPR), 1) + +/* Like PACK_EXPANSION_EXTRA_ARGS, for requires-expressions. */ +#define REQUIRES_EXPR_EXTRA_ARGS(NODE) \ + TREE_OPERAND (TREE_CHECK (NODE, REQUIRES_EXPR), 2) + +enum cp_tree_node_structure_enum { + TS_CP_GENERIC, + TS_CP_IDENTIFIER, + TS_CP_TPI, + TS_CP_PTRMEM, + TS_CP_OVERLOAD, + TS_CP_BINDING_VECTOR, + TS_CP_BASELINK, + TS_CP_TEMPLATE_DECL, + TS_CP_DEFERRED_PARSE, + TS_CP_DEFERRED_NOEXCEPT, + TS_CP_STATIC_ASSERT, + TS_CP_ARGUMENT_PACK_SELECT, + TS_CP_TRAIT_EXPR, + TS_CP_LAMBDA_EXPR, + TS_CP_TEMPLATE_INFO, + TS_CP_CONSTRAINT_INFO, + TS_CP_USERDEF_LITERAL +}; + +/* The resulting tree type. */ +union GTY((desc ("cp_tree_node_structure (&%h)"), + chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node { + union tree_node GTY ((tag ("TS_CP_GENERIC"), + desc ("tree_node_structure (&%h)"))) generic; + struct template_parm_index GTY ((tag ("TS_CP_TPI"))) tpi; + struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem; + struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload; + struct tree_binding_vec GTY ((tag ("TS_CP_BINDING_VECTOR"))) binding_vec; + struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink; + struct tree_template_decl GTY ((tag ("TS_CP_TEMPLATE_DECL"))) template_decl; + struct tree_deferred_parse GTY ((tag ("TS_CP_DEFERRED_PARSE"))) deferred_parse; + struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept; + struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier; + struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT"))) + static_assertion; + struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT"))) + argument_pack_select; + struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR"))) + trait_expression; + struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR"))) + lambda_expression; + struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO"))) + template_info; + struct tree_constraint_info GTY ((tag ("TS_CP_CONSTRAINT_INFO"))) + constraint_info; + struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL"))) + userdef_literal; +}; + + +struct GTY(()) omp_declare_target_attr { + bool attr_syntax; +}; + +/* Global state. */ + +struct GTY(()) saved_scope { + vec *old_bindings; + tree old_namespace; + vec *decl_ns_list; + tree class_name; + tree class_type; + tree access_specifier; + tree function_decl; + vec *lang_base; + tree lang_name; + tree template_parms; + cp_binding_level *x_previous_class_level; + tree x_saved_tree; + + /* Only used for uses of this in trailing return type. */ + tree x_current_class_ptr; + tree x_current_class_ref; + + int x_processing_template_decl; + int x_processing_specialization; + int x_processing_constraint; + int suppress_location_wrappers; + BOOL_BITFIELD x_processing_explicit_instantiation : 1; + BOOL_BITFIELD need_pop_function_context : 1; + + /* Nonzero if we are parsing the discarded statement of a constexpr + if-statement. */ + BOOL_BITFIELD discarded_stmt : 1; + /* Nonzero if we are parsing or instantiating the compound-statement + of consteval if statement. Also set while processing an immediate + invocation. */ + BOOL_BITFIELD consteval_if_p : 1; + + int unevaluated_operand; + int inhibit_evaluation_warnings; + int noexcept_operand; + int ref_temp_count; + + struct stmt_tree_s x_stmt_tree; + + cp_binding_level *class_bindings; + cp_binding_level *bindings; + + hash_map *GTY((skip)) x_local_specializations; + vec *omp_declare_target_attribute; + + struct saved_scope *prev; +}; + +extern GTY(()) struct saved_scope *scope_chain; + +/* The current open namespace. */ + +#define current_namespace scope_chain->old_namespace + +/* The stack for namespaces of current declarations. */ + +#define decl_namespace_list scope_chain->decl_ns_list + +/* IDENTIFIER_NODE: name of current class */ + +#define current_class_name scope_chain->class_name + +/* _TYPE: the type of the current class */ + +#define current_class_type scope_chain->class_type + +/* When parsing a class definition, the access specifier most recently + given by the user, or, if no access specifier was given, the + default value appropriate for the kind of class (i.e., struct, + class, or union). */ + +#define current_access_specifier scope_chain->access_specifier + +/* Pointer to the top of the language name stack. */ + +#define current_lang_base scope_chain->lang_base +#define current_lang_name scope_chain->lang_name + +/* When parsing a template declaration, a TREE_LIST represents the + active template parameters. Each node in the list represents one + level of template parameters. The innermost level is first in the + list. The depth of each level is stored as an INTEGER_CST in the + TREE_PURPOSE of each node. The parameters for that level are + stored in the TREE_VALUE. */ + +#define current_template_parms scope_chain->template_parms +#define current_template_depth \ + (current_template_parms ? TMPL_PARMS_DEPTH (current_template_parms) : 0) + +#define processing_template_decl scope_chain->x_processing_template_decl +#define processing_specialization scope_chain->x_processing_specialization +#define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation + +#define in_discarded_stmt scope_chain->discarded_stmt +#define in_consteval_if_p scope_chain->consteval_if_p + +#define current_ref_temp_count scope_chain->ref_temp_count + +/* RAII sentinel to handle clearing processing_template_decl and restoring + it when done. */ + +class processing_template_decl_sentinel +{ +public: + int saved; + processing_template_decl_sentinel (bool reset = true) + : saved (processing_template_decl) + { + if (reset) + processing_template_decl = 0; + } + ~processing_template_decl_sentinel() + { + processing_template_decl = saved; + } +}; + +/* RAII sentinel to disable certain warnings during template substitution + and elsewhere. */ + +class warning_sentinel +{ +public: + int &flag; + int val; + warning_sentinel(int& flag, bool suppress=true) + : flag(flag), val(flag) { if (suppress) flag = 0; } + ~warning_sentinel() { flag = val; } +}; + +/* RAII sentinel to temporarily override input_location. This will not set + input_location to UNKNOWN_LOCATION or BUILTINS_LOCATION. */ + +class iloc_sentinel +{ + location_t saved_loc; +public: + iloc_sentinel (location_t loc): saved_loc (input_location) + { + if (loc >= RESERVED_LOCATION_COUNT) + input_location = loc; + } + ~iloc_sentinel () + { + input_location = saved_loc; + } +}; + +/* RAII sentinel that saves the value of a variable, optionally + overrides it right away, and restores its value when the sentinel + id destructed. */ + +template +class temp_override +{ + T& overridden_variable; + T saved_value; +public: + temp_override(T& var) : overridden_variable (var), saved_value (var) {} + temp_override(T& var, T overrider) + : overridden_variable (var), saved_value (var) + { + overridden_variable = overrider; + } + ~temp_override() { overridden_variable = saved_value; } +}; + +/* Wrapping a template parameter in type_identity_t hides it from template + argument deduction. */ +#if __cpp_lib_type_identity +using std::type_identity_t; +#else +template +struct type_identity { typedef T type; }; +template +using type_identity_t = typename type_identity::type; +#endif + +/* Object generator function for temp_override, so you don't need to write the + type of the object as a template argument. + + Use as auto x = make_temp_override (flag); */ + +template +inline temp_override +make_temp_override (T& var) +{ + return { var }; +} + +/* Likewise, but use as auto x = make_temp_override (flag, value); */ + +template +inline temp_override +make_temp_override (T& var, type_identity_t overrider) +{ + return { var, overrider }; +} + +/* The cached class binding level, from the most recently exited + class, or NULL if none. */ + +#define previous_class_level scope_chain->x_previous_class_level + +/* A map from local variable declarations in the body of the template + presently being instantiated to the corresponding instantiated + local variables. */ + +#define local_specializations scope_chain->x_local_specializations + +/* Nonzero if we are parsing the operand of a noexcept operator. */ + +#define cp_noexcept_operand scope_chain->noexcept_operand + +struct named_label_entry; /* Defined in decl.cc. */ + +struct named_label_hash : ggc_remove +{ + typedef named_label_entry *value_type; + typedef tree compare_type; /* An identifier. */ + + inline static hashval_t hash (value_type); + inline static bool equal (const value_type, compare_type); + + static const bool empty_zero_p = true; + inline static void mark_empty (value_type &p) {p = NULL;} + inline static bool is_empty (value_type p) {return !p;} + + /* Nothing is deletable. Everything is insertable. */ + inline static bool is_deleted (value_type) { return false; } + inline static void mark_deleted (value_type) { gcc_unreachable (); } +}; + +/* Global state pertinent to the current function. */ + +struct GTY(()) language_function { + struct c_language_function base; + + tree x_cdtor_label; + tree x_current_class_ptr; + tree x_current_class_ref; + tree x_eh_spec_block; + tree x_in_charge_parm; + tree x_vtt_parm; + tree x_return_value; + + BOOL_BITFIELD returns_value : 1; + BOOL_BITFIELD returns_null : 1; + BOOL_BITFIELD returns_abnormally : 1; + BOOL_BITFIELD infinite_loop: 1; + BOOL_BITFIELD x_in_function_try_handler : 1; + BOOL_BITFIELD x_in_base_initializer : 1; + + /* True if this function can throw an exception. */ + BOOL_BITFIELD can_throw : 1; + + BOOL_BITFIELD invalid_constexpr : 1; + BOOL_BITFIELD throwing_cleanup : 1; + + hash_table *x_named_labels; + + cp_binding_level *bindings; + + /* Tracking possibly infinite loops. This is a vec only because + vec doesn't work with gtype. */ + vec *infinite_loops; +}; + +/* The current C++-specific per-function global variables. */ + +#define cp_function_chain (cfun->language) + +/* In a constructor destructor, the point at which all derived class + destroying/construction has been done. I.e., just before a + constructor returns, or before any base class destroying will be done + in a destructor. */ + +#define cdtor_label cp_function_chain->x_cdtor_label + +/* When we're processing a member function, current_class_ptr is the + PARM_DECL for the `this' pointer. The current_class_ref is an + expression for `*this'. */ + +#define current_class_ptr \ + (*(cfun && cp_function_chain \ + ? &cp_function_chain->x_current_class_ptr \ + : &scope_chain->x_current_class_ptr)) +#define current_class_ref \ + (*(cfun && cp_function_chain \ + ? &cp_function_chain->x_current_class_ref \ + : &scope_chain->x_current_class_ref)) + +/* The EH_SPEC_BLOCK for the exception-specifiers for the current + function, if any. */ + +#define current_eh_spec_block cp_function_chain->x_eh_spec_block + +/* The `__in_chrg' parameter for the current function. Only used for + constructors and destructors. */ + +#define current_in_charge_parm cp_function_chain->x_in_charge_parm + +/* The `__vtt_parm' parameter for the current function. Only used for + constructors and destructors. */ + +#define current_vtt_parm cp_function_chain->x_vtt_parm + +/* A boolean flag to control whether we need to clean up the return value if a + local destructor throws. Only used in functions that return by value a + class with a destructor. Which 'tors don't, so we can use the same + field as current_vtt_parm. */ + +#define current_retval_sentinel current_vtt_parm + +/* Set to 0 at beginning of a function definition, set to 1 if + a return statement that specifies a return value is seen. */ + +#define current_function_returns_value cp_function_chain->returns_value + +/* Set to 0 at beginning of a function definition, set to 1 if + a return statement with no argument is seen. */ + +#define current_function_returns_null cp_function_chain->returns_null + +/* Set to 0 at beginning of a function definition, set to 1 if + a call to a noreturn function is seen. */ + +#define current_function_returns_abnormally \ + cp_function_chain->returns_abnormally + +/* Set to 0 at beginning of a function definition, set to 1 if we see an + obvious infinite loop. This can have false positives and false + negatives, so it should only be used as a heuristic. */ + +#define current_function_infinite_loop cp_function_chain->infinite_loop + +/* Nonzero if we are processing a base initializer. Zero elsewhere. */ +#define in_base_initializer cp_function_chain->x_in_base_initializer + +#define in_function_try_handler cp_function_chain->x_in_function_try_handler + +/* Expression always returned from function, or error_mark_node + otherwise, for use by the automatic named return value optimization. */ + +#define current_function_return_value \ + (cp_function_chain->x_return_value) + +/* In parser.cc. */ +extern tree cp_literal_operator_id (const char *); + +#define NON_ERROR(NODE) ((NODE) == error_mark_node ? NULL_TREE : (NODE)) + +/* TRUE if a tree code represents a statement. */ +extern bool statement_code_p[MAX_TREE_CODES]; + +#define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)] + +enum languages { lang_c, lang_cplusplus }; + +/* Macros to make error reporting functions' lives easier. */ +#define TYPE_LINKAGE_IDENTIFIER(NODE) \ + (TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE))) +#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE))) +#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE))) + +/* Any kind of anonymous type. */ +#define TYPE_ANON_P(NODE) \ + (TYPE_LINKAGE_IDENTIFIER (NODE) \ + && IDENTIFIER_ANON_P (TYPE_LINKAGE_IDENTIFIER (NODE))) + +/* Nonzero if NODE, a TYPE, has no name for linkage purposes. */ +#define TYPE_UNNAMED_P(NODE) \ + (TYPE_ANON_P (NODE) \ + && !IDENTIFIER_LAMBDA_P (TYPE_LINKAGE_IDENTIFIER (NODE))) + +/* The _DECL for this _TYPE. */ +#define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE))) + +/* Nonzero if T is a type that could resolve to any kind of concrete type + at instantiation time. */ +#define WILDCARD_TYPE_P(T) \ + (TREE_CODE (T) == TEMPLATE_TYPE_PARM \ + || TREE_CODE (T) == TYPENAME_TYPE \ + || TREE_CODE (T) == TYPEOF_TYPE \ + || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \ + || TREE_CODE (T) == DECLTYPE_TYPE \ + || TREE_CODE (T) == DEPENDENT_OPERATOR_TYPE) + +/* Nonzero if T is a class (or struct or union) type. Also nonzero + for template type parameters, typename types, and instantiated + template template parameters. Keep these checks in ascending code + order. */ +#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T)) + +/* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or + union type. */ +#define SET_CLASS_TYPE_P(T, VAL) \ + (TYPE_LANG_FLAG_5 (RECORD_OR_UNION_CHECK (T)) = (VAL)) + +/* Nonzero if T is a class type. Zero for template type parameters, + typename types, and so forth. */ +#define CLASS_TYPE_P(T) \ + (RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T)) + +/* Nonzero if T is a class type but not a union. */ +#define NON_UNION_CLASS_TYPE_P(T) \ + (TREE_CODE (T) == RECORD_TYPE && TYPE_LANG_FLAG_5 (T)) + +/* Keep these checks in ascending code order. */ +#define RECORD_OR_UNION_CODE_P(T) \ + ((T) == RECORD_TYPE || (T) == UNION_TYPE) +#define OVERLOAD_TYPE_P(T) \ + (CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE) + +/* True if this type is dependent. This predicate is only valid if + TYPE_DEPENDENT_P_VALID is true. */ +#define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE) + +/* True if dependent_type_p has been called for this type, with the + result that TYPE_DEPENDENT_P is valid. */ +#define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE) + +/* Nonzero if this type is const-qualified. */ +#define CP_TYPE_CONST_P(NODE) \ + ((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0) + +/* Nonzero if this type is volatile-qualified. */ +#define CP_TYPE_VOLATILE_P(NODE) \ + ((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0) + +/* Nonzero if this type is restrict-qualified. */ +#define CP_TYPE_RESTRICT_P(NODE) \ + ((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0) + +/* Nonzero if this type is const-qualified, but not + volatile-qualified. Other qualifiers are ignored. This macro is + used to test whether or not it is OK to bind an rvalue to a + reference. */ +#define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \ + ((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \ + == TYPE_QUAL_CONST) + +#define FUNCTION_ARG_CHAIN(NODE) \ + TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE))) + +/* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES + which refers to a user-written parameter. */ +#define FUNCTION_FIRST_USER_PARMTYPE(NODE) \ + skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE))) + +/* Similarly, but for DECL_ARGUMENTS. */ +#define FUNCTION_FIRST_USER_PARM(NODE) \ + skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE)) + +/* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and + ambiguity issues. */ +#define DERIVED_FROM_P(PARENT, TYPE) \ + (lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE) + +/* Gives the visibility specification for a class type. */ +#define CLASSTYPE_VISIBILITY(TYPE) \ + DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE)) +#define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \ + DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE)) + +struct GTY (()) tree_pair_s { + tree purpose; + tree value; +}; +typedef tree_pair_s *tree_pair_p; + +/* This structure provides additional information above and beyond + what is provide in the ordinary tree_type. In the past, we used it + for the types of class types, template parameters types, typename + types, and so forth. However, there can be many (tens to hundreds + of thousands) of template parameter types in a compilation, and + there's no need for this additional information in that case. + Therefore, we now use this data structure only for class types. + + In the past, it was thought that there would be relatively few + class types. However, in the presence of heavy use of templates, + many (i.e., thousands) of classes can easily be generated. + Therefore, we should endeavor to keep the size of this structure to + a minimum. */ +struct GTY(()) lang_type { + unsigned char align; + + unsigned has_type_conversion : 1; + unsigned has_copy_ctor : 1; + unsigned has_default_ctor : 1; + unsigned const_needs_init : 1; + unsigned ref_needs_init : 1; + unsigned has_const_copy_assign : 1; + unsigned use_template : 2; + + unsigned has_mutable : 1; + unsigned com_interface : 1; + unsigned non_pod_class : 1; + unsigned nearly_empty_p : 1; + unsigned user_align : 1; + unsigned has_copy_assign : 1; + unsigned has_new : 1; + unsigned has_array_new : 1; + + unsigned gets_delete : 2; + unsigned interface_only : 1; + unsigned interface_unknown : 1; + unsigned contains_empty_class_p : 1; + unsigned anon_aggr : 1; + unsigned non_zero_init : 1; + unsigned empty_p : 1; + /* 32 bits allocated. */ + + unsigned vec_new_uses_cookie : 1; + unsigned declared_class : 1; + unsigned diamond_shaped : 1; + unsigned repeated_base : 1; + unsigned being_defined : 1; + unsigned debug_requested : 1; + unsigned fields_readonly : 1; + unsigned ptrmemfunc_flag : 1; + + unsigned lazy_default_ctor : 1; + unsigned lazy_copy_ctor : 1; + unsigned lazy_copy_assign : 1; + unsigned lazy_destructor : 1; + unsigned has_const_copy_ctor : 1; + unsigned has_complex_copy_ctor : 1; + unsigned has_complex_copy_assign : 1; + unsigned non_aggregate : 1; + + unsigned has_complex_dflt : 1; + unsigned has_list_ctor : 1; + unsigned non_std_layout : 1; + unsigned is_literal : 1; + unsigned lazy_move_ctor : 1; + unsigned lazy_move_assign : 1; + unsigned has_complex_move_ctor : 1; + unsigned has_complex_move_assign : 1; + + unsigned has_constexpr_ctor : 1; + unsigned unique_obj_representations : 1; + unsigned unique_obj_representations_set : 1; + bool erroneous : 1; + bool non_pod_aggregate : 1; + + /* When adding a flag here, consider whether or not it ought to + apply to a template instance if it applies to the template. If + so, make sure to copy it in instantiate_class_template! */ + + /* There are some bits left to fill out a 32-bit word. Keep track + of this by updating the size of this bitfield whenever you add or + remove a flag. */ + unsigned dummy : 3; + + tree primary_base; + vec *vcall_indices; + tree vtables; + tree typeinfo_var; + vec *vbases; + tree as_base; + vec *pure_virtuals; + tree friend_classes; + vec * GTY((reorder ("resort_type_member_vec"))) members; + tree key_method; + tree decl_list; + tree befriending_classes; + /* In a RECORD_TYPE, information specific to Objective-C++, such + as a list of adopted protocols or a pointer to a corresponding + @interface. See objc/objc-act.h for details. */ + tree objc_info; + /* FIXME reuse another field? */ + tree lambda_expr; +}; + +/* We used to have a variant type for lang_type. Keep the name of the + checking accessor for the sole survivor. */ +#define LANG_TYPE_CLASS_CHECK(NODE) (TYPE_LANG_SPECIFIC (NODE)) + +/* Nonzero for _CLASSTYPE means that operator delete is defined. */ +#define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete) +#define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1) +#define TYPE_GETS_VEC_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 2) + +/* Nonzero if `new NODE[x]' should cause the allocation of extra + storage to indicate how many array elements are in use. */ +#define TYPE_VEC_NEW_USES_COOKIE(NODE) \ + (CLASS_TYPE_P (NODE) \ + && LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie) + +/* Nonzero means that this _CLASSTYPE node defines ways of converting + itself to other types. */ +#define TYPE_HAS_CONVERSION(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->has_type_conversion) + +/* Nonzero means that NODE (a class type) has a default constructor -- + but that it has not yet been declared. */ +#define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor) + +/* Nonzero means that NODE (a class type) has a copy constructor -- + but that it has not yet been declared. */ +#define CLASSTYPE_LAZY_COPY_CTOR(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor) + +/* Nonzero means that NODE (a class type) has a move constructor -- + but that it has not yet been declared. */ +#define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor) + +/* Nonzero means that NODE (a class type) has an assignment operator + -- but that it has not yet been declared. */ +#define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign) + +/* Nonzero means that NODE (a class type) has an assignment operator + -- but that it has not yet been declared. */ +#define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign) + +/* Nonzero means that NODE (a class type) has a destructor -- but that + it has not yet been declared. */ +#define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor) + +/* Nonzero means that NODE (a class type) is final */ +#define CLASSTYPE_FINAL(NODE) \ + TYPE_FINAL_P (NODE) + + +/* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */ +#define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign) + +/* True iff the class type NODE has an "operator =" whose parameter + has a parameter of type "const X&". */ +#define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_assign) + +/* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */ +#define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_ctor) +#define TYPE_HAS_CONST_COPY_CTOR(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor) + +/* Nonzero if this class has an X(initializer_list) constructor. */ +#define TYPE_HAS_LIST_CTOR(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor) + +/* Nonzero if this class has a constexpr constructor other than a copy/move + constructor. Note that a class can have constexpr constructors for + static initialization even if it isn't a literal class. */ +#define TYPE_HAS_CONSTEXPR_CTOR(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor) + +/* Nonzero if this class defines an overloaded operator new. (An + operator new [] doesn't count.) */ +#define TYPE_HAS_NEW_OPERATOR(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->has_new) + +/* Nonzero if this class defines an overloaded operator new[]. */ +#define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->has_array_new) + +/* Nonzero means that this type is being defined. I.e., the left brace + starting the definition of this type has been seen. */ +#define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined) + +/* Nonzero means that this type is either complete or being defined, so we + can do lookup in it. */ +#define COMPLETE_OR_OPEN_TYPE_P(NODE) \ + (COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE))) + +/* Mark bits for repeated base checks. */ +#define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE)) + +/* Nonzero if the class NODE has multiple paths to the same (virtual) + base object. */ +#define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \ + (LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped) + +/* Nonzero if the class NODE has multiple instances of the same base + type. */ +#define CLASSTYPE_REPEATED_BASE_P(NODE) \ + (LANG_TYPE_CLASS_CHECK(NODE)->repeated_base) + +/* The member function with which the vtable will be emitted: + the first noninline non-pure-virtual member function. NULL_TREE + if there is no key function or if this is a class template */ +#define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method) + +/* Vector of members. During definition, it is unordered and only + member functions are present. After completion it is sorted and + contains both member functions and non-functions. STAT_HACK is + involved to preserve oneslot per name invariant. */ +#define CLASSTYPE_MEMBER_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->members) + +/* For class templates, this is a TREE_LIST of all member data, + functions, types, and friends in the order of declaration. + The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend, + and the RECORD_TYPE for the class template otherwise. */ +#define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list) + +/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These + are the constructors that take an in-charge parameter. */ +#define CLASSTYPE_CONSTRUCTORS(NODE) \ + (get_class_binding_direct (NODE, ctor_identifier)) + +/* A FUNCTION_DECL for the destructor for NODE. This is the + destructors that take an in-charge parameter. If + CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL + until the destructor is created with lazily_declare_fn. */ +#define CLASSTYPE_DESTRUCTOR(NODE) \ + (get_class_binding_direct (NODE, dtor_identifier)) + +/* Nonzero if NODE has a primary base class, i.e., a base class with + which it shares the virtual function table pointer. */ +#define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \ + (CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE) + +/* If non-NULL, this is the binfo for the primary base class, i.e., + the base class which contains the virtual function table pointer + for this class. */ +#define CLASSTYPE_PRIMARY_BINFO(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->primary_base) + +/* A vector of BINFOs for the direct and indirect virtual base classes + that this type uses in a post-order depth-first left-to-right + order. (In other words, these bases appear in the order that they + should be initialized.) */ +#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases) + +/* The type corresponding to NODE when NODE is used as a base class, + i.e., NODE without virtual base classes or tail padding. */ +#define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base) + +/* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */ +#define IS_FAKE_BASE_TYPE(NODE) \ + (TREE_CODE (NODE) == RECORD_TYPE \ + && TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \ + && CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE)) + +/* These are the size and alignment of the type without its virtual + base classes, for when we use this type as a base itself. */ +#define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE)) +#define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE)) +#define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE)) +#define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE)) + +/* The alignment of NODE, without its virtual bases, in bytes. */ +#define CLASSTYPE_ALIGN_UNIT(NODE) \ + (CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT) + +/* A vec of virtual functions which cannot be inherited by + derived classes. When deriving from this type, the derived + class must provide its own definition for each of these functions. */ +#define CLASSTYPE_PURE_VIRTUALS(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals) + +/* Nonzero means that this type is an abstract class type. */ +#define ABSTRACT_CLASS_TYPE_P(NODE) \ + (CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE)) + +/* Nonzero means that this type has an X() constructor. */ +#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->has_default_ctor) + +/* Nonzero means that this type contains a mutable member. */ +#define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable) +#define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE)) + +/* Nonzero means that this class type is not POD for the purpose of layout + (as defined in the ABI). This is different from the language's POD. */ +#define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class) + +/* Nonzero means that this class type is a non-standard-layout class. */ +#define CLASSTYPE_NON_STD_LAYOUT(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout) + +/* Nonzero means that this class type does have unique object + representations. */ +#define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations) + +/* Nonzero means that this class type has + CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS computed. */ +#define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS_SET(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations_set) + +/* Nonzero means that this class contains pod types whose default + initialization is not a zero initialization (namely, pointers to + data members). */ +#define CLASSTYPE_NON_ZERO_INIT_P(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init) + +/* Nonzero if this class is "empty" in the sense of the C++ ABI. */ +#define CLASSTYPE_EMPTY_P(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->empty_p) + +/* Nonzero if this class is "nearly empty", i.e., contains only a + virtual function table pointer. */ +#define CLASSTYPE_NEARLY_EMPTY_P(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p) + +/* Nonzero if this class contains an empty subobject. */ +#define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p) + +/* A list of class types of which this type is a friend. The + TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the + case of a template friend. */ +#define CLASSTYPE_FRIEND_CLASSES(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->friend_classes) + +/* A list of the classes which grant friendship to this class. */ +#define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes) + +/* The associated LAMBDA_EXPR that made this class. */ +#define CLASSTYPE_LAMBDA_EXPR(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr) +/* The extra mangling scope for this closure type. */ +#define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \ + (LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE))) + +/* Say whether this node was declared as a "class" or a "struct". */ +#define CLASSTYPE_DECLARED_CLASS(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->declared_class) + +/* Nonzero if this class has const members + which have no specified initialization. */ +#define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \ + (TYPE_LANG_SPECIFIC (NODE) \ + ? LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init : 0) +#define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init = (VALUE)) + +/* Nonzero if this class has ref members + which have no specified initialization. */ +#define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \ + (TYPE_LANG_SPECIFIC (NODE) \ + ? LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init : 0) +#define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init = (VALUE)) + +/* Nonzero if this class is included from a header file which employs + `#pragma interface', and it is not included in its implementation file. */ +#define CLASSTYPE_INTERFACE_ONLY(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->interface_only) + +/* True if we have already determined whether or not vtables, VTTs, + typeinfo, and other similar per-class data should be emitted in + this translation unit. This flag does not indicate whether or not + these items should be emitted; it only indicates that we know one + way or the other. */ +#define CLASSTYPE_INTERFACE_KNOWN(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0) +/* The opposite of CLASSTYPE_INTERFACE_KNOWN. */ +#define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown) + +#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \ + (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X)) +#define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1) +#define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0) + +/* Nonzero if a _DECL node requires us to output debug info for this class. */ +#define CLASSTYPE_DEBUG_REQUESTED(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->debug_requested) + +/* True if we saw errors while instantiating this class. */ +#define CLASSTYPE_ERRONEOUS(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->erroneous) + +/* True if this class is non-layout-POD only because it was not an aggregate + before C++14. If we run out of bits in lang_type, this could be replaced + with a hash_set only filled in when abi_version_crosses (17). */ +#define CLASSTYPE_NON_POD_AGGREGATE(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->non_pod_aggregate) + +/* Additional macros for inheritance information. */ + +/* Nonzero means that this class is on a path leading to a new vtable. */ +#define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE) + +/* Nonzero means B (a BINFO) has its own vtable. Any copies will not + have this flag set. */ +#define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B)) + +/* Compare a BINFO_TYPE with another type for equality. For a binfo, + this is functionally equivalent to using same_type_p, but + measurably faster. At least one of the arguments must be a + BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If + BINFO_TYPE(T) ever stops being the main variant of the class the + binfo is for, this macro must change. */ +#define SAME_BINFO_TYPE_P(A, B) ((A) == (B)) + +/* Any subobject that needs a new vtable must have a vptr and must not + be a non-virtual primary base (since it would then use the vtable from a + derived class and never become non-primary.) */ +#define SET_BINFO_NEW_VTABLE_MARKED(B) \ + (BINFO_NEW_VTABLE_MARKED (B) = 1, \ + gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \ + gcc_assert (TYPE_VFIELD (BINFO_TYPE (B)))) + +/* Nonzero if this binfo is for a dependent base - one that should not + be searched. */ +#define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE) + +/* Nonzero if this binfo has lost its primary base binfo (because that + is a nearly-empty virtual base that has been taken by some other + base in the complete hierarchy. */ +#define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE) + +/* Nonzero if this BINFO is a primary base class. */ +#define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE) + +/* A vec of the vcall indices associated with the class + NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual + function. The VALUE is the index into the virtual table where the + vcall offset for that function is stored, when NODE is a virtual + base. */ +#define CLASSTYPE_VCALL_INDICES(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices) + +/* The various vtables for the class NODE. The primary vtable will be + first, followed by the construction vtables and VTT, if any. */ +#define CLASSTYPE_VTABLES(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->vtables) + +/* The std::type_info variable representing this class, or NULL if no + such variable has been created. This field is only set for the + TYPE_MAIN_VARIANT of the class. */ +#define CLASSTYPE_TYPEINFO_VAR(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var) + +/* Accessor macros for the BINFO_VIRTUALS list. */ + +/* The number of bytes by which to adjust the `this' pointer when + calling this virtual function. Subtract this value from the this + pointer. Always non-NULL, might be constant zero though. */ +#define BV_DELTA(NODE) (TREE_PURPOSE (NODE)) + +/* If non-NULL, the vtable index at which to find the vcall offset + when calling this virtual function. Add the value at that vtable + index to the this pointer. */ +#define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE)) + +/* The function to call. */ +#define BV_FN(NODE) (TREE_VALUE (NODE)) + +/* Whether or not this entry is for a lost primary virtual base. */ +#define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE)) + +/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that + this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE + will be NULL_TREE to indicate a throw specification of `()', or + no exceptions allowed. For a noexcept specification, TREE_VALUE + is NULL_TREE and TREE_PURPOSE is the constant-expression. For + a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT + (for templates) or an OVERLOAD list of functions (for implicitly + declared functions). */ +#define TYPE_RAISES_EXCEPTIONS(NODE) \ + TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE)) + +/* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()' + or noexcept(true). */ +#define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE)) + +/* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the + case for things declared noexcept(true) and, with -fnothrow-opt, for + throw() functions. */ +#define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE) + +/* The binding level associated with the namespace. */ +#define NAMESPACE_LEVEL(NODE) \ + (LANG_DECL_NS_CHECK (NODE)->level) + +/* Discriminator values for lang_decl. */ + +enum lang_decl_selector +{ + lds_min, + lds_fn, + lds_ns, + lds_parm, + lds_decomp +}; + +/* Flags shared by all forms of DECL_LANG_SPECIFIC. + + Some of the flags live here only to make lang_decl_min/fn smaller. Do + not make this struct larger than 32 bits. */ + +struct GTY(()) lang_decl_base { + ENUM_BITFIELD(lang_decl_selector) selector : 3; + ENUM_BITFIELD(languages) language : 1; + unsigned use_template : 2; + unsigned not_really_extern : 1; /* var or fn */ + unsigned initialized_in_class : 1; /* var or fn */ + + unsigned threadprivate_or_deleted_p : 1; /* var or fn */ + /* anticipated_p is no longer used for anticipated_decls (fn, type + or template). It is used as DECL_OMP_PRIVATIZED_MEMBER in + var. */ + unsigned anticipated_p : 1; + unsigned friend_or_tls : 1; /* var, fn, type or template */ + unsigned unknown_bound_p : 1; /* var */ + unsigned odr_used : 1; /* var or fn */ + unsigned concept_p : 1; /* applies to vars and functions */ + unsigned var_declared_inline_p : 1; /* var */ + unsigned dependent_init_p : 1; /* var */ + + /* The following apply to VAR, FUNCTION, TYPE, CONCEPT, & NAMESPACE + decls. */ + // FIXME: Purview and Attachment are not the same thing, due to + // linkage-declarations. The modules code presumes they are the + // same. (For context, linkage-decl semantics was a very late + // change). We need a module_attachment_p flag, and this will allow + // some simplification of how we handle header unit entities. + // Hurrah! + unsigned module_purview_p : 1; /* in module purview (not GMF) */ + unsigned module_import_p : 1; /* from an import */ + unsigned module_entity_p : 1; /* is in the entitity ary & + hash. */ + /* VAR_DECL or FUNCTION_DECL has attached decls. */ + unsigned module_attached_p : 1; + + /* 12 spare bits. */ +}; + +/* True for DECL codes which have template info and access. */ +#define LANG_DECL_HAS_MIN(NODE) \ + (VAR_OR_FUNCTION_DECL_P (NODE) \ + || TREE_CODE (NODE) == FIELD_DECL \ + || TREE_CODE (NODE) == CONST_DECL \ + || TREE_CODE (NODE) == TYPE_DECL \ + || TREE_CODE (NODE) == TEMPLATE_DECL \ + || TREE_CODE (NODE) == USING_DECL \ + || TREE_CODE (NODE) == CONCEPT_DECL) + +/* DECL_LANG_SPECIFIC for the above codes. */ + +struct GTY(()) lang_decl_min { + struct lang_decl_base base; /* 32-bits. */ + + /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is + THUNK_ALIAS. + In a FUNCTION_DECL for which DECL_THUNK_P does not hold, + VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is + DECL_TEMPLATE_INFO. */ + tree template_info; + + /* In a DECL_THUNK_P FUNCTION_DECL, this is THUNK_VIRTUAL_OFFSET. + In a lambda-capture proxy VAR_DECL, this is DECL_CAPTURED_VARIABLE. + In a function-scope TREE_STATIC VAR_DECL or IMPLICIT_TYPEDEF_P TYPE_DECL, + this is DECL_DISCRIMINATOR. + In a DECL_LOCAL_DECL_P decl, this is the namespace decl it aliases. + Otherwise, in a class-scope DECL, this is DECL_ACCESS. */ + tree access; +}; + +/* Additional DECL_LANG_SPECIFIC information for functions. */ + +struct GTY(()) lang_decl_fn { + struct lang_decl_min min; + + /* In a overloaded operator, this is the compressed operator code. */ + unsigned ovl_op_code : 6; + unsigned global_ctor_p : 1; + unsigned global_dtor_p : 1; + + unsigned static_function : 1; + unsigned pure_virtual : 1; + unsigned defaulted_p : 1; + unsigned has_in_charge_parm_p : 1; + unsigned has_vtt_parm_p : 1; + unsigned pending_inline_p : 1; + unsigned nonconverting : 1; + unsigned thunk_p : 1; + + unsigned this_thunk_p : 1; + unsigned omp_declare_reduction_p : 1; + unsigned has_dependent_explicit_spec_p : 1; + unsigned immediate_fn_p : 1; + unsigned maybe_deleted : 1; + unsigned coroutine_p : 1; + unsigned implicit_constexpr : 1; + + unsigned spare : 9; + + /* 32-bits padding on 64-bit host. */ + + /* For a non-thunk function decl, this is a tree list of + friendly classes. For a thunk function decl, it is the + thunked to function decl. */ + tree befriending_classes; + + /* For a virtual FUNCTION_DECL for which + DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both + this pointer and result pointer adjusting thunks are + chained here. This pointer thunks to return pointer thunks + will be chained on the return pointer thunk. + For a DECL_CONSTUCTOR_P FUNCTION_DECL, this is the base from + whence we inherit. Otherwise, it is the class in which a + (namespace-scope) friend is defined (if any). */ + tree context; + + union lang_decl_u5 + { + /* In a non-thunk FUNCTION_DECL, this is DECL_CLONED_FUNCTION. */ + tree GTY ((tag ("0"))) cloned_function; + + /* In a FUNCTION_DECL for which THUNK_P holds this is the + THUNK_FIXED_OFFSET. */ + HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset; + } GTY ((desc ("%1.thunk_p"))) u5; + + union lang_decl_u3 + { + struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info; + tree GTY ((tag ("0"))) saved_auto_return_type; + } GTY ((desc ("%1.pending_inline_p"))) u; + +}; + +/* DECL_LANG_SPECIFIC for namespaces. */ + +struct GTY(()) lang_decl_ns { + struct lang_decl_base base; /* 32 bits. */ + cp_binding_level *level; + + /* Inline children. Needs to be va_gc, because of PCH. */ + vec *inlinees; + + /* Hash table of bound decls. It'd be nice to have this inline, but + as the hash_map has a dtor, we can't then put this struct into a + union (until moving to c++11). */ + hash_table *bindings; +}; + +/* DECL_LANG_SPECIFIC for parameters. */ + +struct GTY(()) lang_decl_parm { + struct lang_decl_base base; /* 32 bits. */ + int level; + int index; +}; + +/* Additional DECL_LANG_SPECIFIC information for structured bindings. */ + +struct GTY(()) lang_decl_decomp { + struct lang_decl_min min; + /* The artificial underlying "e" variable of the structured binding + variable. */ + tree base; +}; + +/* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a + union rather than a struct containing a union as its only field, but + tree.h declares it as a struct. */ + +struct GTY(()) lang_decl { + union GTY((desc ("%h.base.selector"))) lang_decl_u { + /* Nothing of only the base type exists. */ + struct lang_decl_base GTY ((default)) base; + struct lang_decl_min GTY((tag ("lds_min"))) min; + struct lang_decl_fn GTY ((tag ("lds_fn"))) fn; + struct lang_decl_ns GTY((tag ("lds_ns"))) ns; + struct lang_decl_parm GTY((tag ("lds_parm"))) parm; + struct lang_decl_decomp GTY((tag ("lds_decomp"))) decomp; + } u; +}; + +/* Looks through a template (if present) to find what it declares. */ +#define STRIP_TEMPLATE(NODE) \ + (TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE) + +#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) + +#define LANG_DECL_MIN_CHECK(NODE) __extension__ \ +({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ + if (!LANG_DECL_HAS_MIN (NODE)) \ + lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ + <->u.min; }) + +/* We want to be able to check DECL_CONSTRUCTOR_P and such on a function + template, not just on a FUNCTION_DECL. So when looking for things in + lang_decl_fn, look down through a TEMPLATE_DECL into its result. */ +#define LANG_DECL_FN_CHECK(NODE) __extension__ \ +({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \ + if (!DECL_DECLARES_FUNCTION_P (NODE) \ + || lt->u.base.selector != lds_fn) \ + lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ + <->u.fn; }) + +#define LANG_DECL_NS_CHECK(NODE) __extension__ \ +({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ + if (TREE_CODE (NODE) != NAMESPACE_DECL \ + || lt->u.base.selector != lds_ns) \ + lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ + <->u.ns; }) + +#define LANG_DECL_PARM_CHECK(NODE) __extension__ \ +({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ + if (TREE_CODE (NODE) != PARM_DECL \ + || lt->u.base.selector != lds_parm) \ + lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ + <->u.parm; }) + +#define LANG_DECL_DECOMP_CHECK(NODE) __extension__ \ +({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ + if (!VAR_P (NODE) \ + || lt->u.base.selector != lds_decomp) \ + lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ + <->u.decomp; }) + +#else + +#define LANG_DECL_MIN_CHECK(NODE) \ + (&DECL_LANG_SPECIFIC (NODE)->u.min) + +#define LANG_DECL_FN_CHECK(NODE) \ + (&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn) + +#define LANG_DECL_NS_CHECK(NODE) \ + (&DECL_LANG_SPECIFIC (NODE)->u.ns) + +#define LANG_DECL_PARM_CHECK(NODE) \ + (&DECL_LANG_SPECIFIC (NODE)->u.parm) + +#define LANG_DECL_DECOMP_CHECK(NODE) \ + (&DECL_LANG_SPECIFIC (NODE)->u.decomp) + +#endif /* ENABLE_TREE_CHECKING */ + +/* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the + declaration. Some entities (like a member function in a local + class, or a local variable) do not have linkage at all, and this + macro should not be used in those cases. + + Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was + created by language-independent code, and has C linkage. Most + VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but + we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */ +#define DECL_LANGUAGE(NODE) \ + (DECL_LANG_SPECIFIC (NODE) \ + ? DECL_LANG_SPECIFIC (NODE)->u.base.language \ + : (TREE_CODE (NODE) == FUNCTION_DECL \ + ? lang_c : lang_cplusplus)) + +/* Set the language linkage for NODE to LANGUAGE. */ +#define SET_DECL_LANGUAGE(NODE, LANGUAGE) \ + (DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE)) + +/* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function + is a constructor. */ +#define DECL_CONSTRUCTOR_P(NODE) \ + DECL_CXX_CONSTRUCTOR_P (STRIP_TEMPLATE (NODE)) + +/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete + object. */ +#define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \ + (DECL_NAME (NODE) == complete_ctor_identifier) + +/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base + object. */ +#define DECL_BASE_CONSTRUCTOR_P(NODE) \ + (DECL_NAME (NODE) == base_ctor_identifier) + +/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the + specialized in-charge constructor or the specialized not-in-charge + constructor. */ +#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \ + (DECL_NAME (NODE) == ctor_identifier) + +/* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */ +#define DECL_COPY_CONSTRUCTOR_P(NODE) \ + (DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0) + +/* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */ +#define DECL_MOVE_CONSTRUCTOR_P(NODE) \ + (DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE)) + +/* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL) + is a destructor. */ +#define DECL_DESTRUCTOR_P(NODE) \ + DECL_CXX_DESTRUCTOR_P (STRIP_TEMPLATE (NODE)) + +/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the + specialized in-charge constructor, in-charge deleting constructor, + or the base destructor. */ +#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \ + (DECL_NAME (NODE) == dtor_identifier) + +/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete + object. */ +#define DECL_COMPLETE_DESTRUCTOR_P(NODE) \ + (DECL_NAME (NODE) == complete_dtor_identifier) + +/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base + object. */ +#define DECL_BASE_DESTRUCTOR_P(NODE) \ + (DECL_NAME (NODE) == base_dtor_identifier) + +/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete + object that deletes the object after it has been destroyed. */ +#define DECL_DELETING_DESTRUCTOR_P(NODE) \ + (DECL_NAME (NODE) == deleting_dtor_identifier) + +/* Nonzero if either DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P or + DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P is true of NODE. */ +#define DECL_MAYBE_IN_CHARGE_CDTOR_P(NODE) \ + (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (NODE) \ + || DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (NODE)) + +/* Nonzero if NODE (a _DECL) is a cloned constructor or + destructor. */ +#define DECL_CLONED_FUNCTION_P(NODE) \ + (DECL_NAME (NODE) \ + && IDENTIFIER_CDTOR_P (DECL_NAME (NODE)) \ + && !DECL_MAYBE_IN_CHARGE_CDTOR_P (NODE)) + +/* If DECL_CLONED_FUNCTION_P holds, this is the function that was + cloned. */ +#define DECL_CLONED_FUNCTION(NODE) \ + (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (NODE))->u.fn.u5.cloned_function) + +/* Perform an action for each clone of FN, if FN is a function with + clones. This macro should be used like: + + FOR_EACH_CLONE (clone, fn) + { ... } + + */ +#define FOR_EACH_CLONE(CLONE, FN) \ + if (!(TREE_CODE (FN) == FUNCTION_DECL \ + && DECL_MAYBE_IN_CHARGE_CDTOR_P (FN))) \ + ; \ + else \ + for (CLONE = DECL_CHAIN (FN); \ + CLONE && DECL_CLONED_FUNCTION_P (CLONE); \ + CLONE = DECL_CHAIN (CLONE)) + +/* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */ +#define DECL_DISCRIMINATOR_P(NODE) \ + (((TREE_CODE (NODE) == VAR_DECL && TREE_STATIC (NODE)) \ + || DECL_IMPLICIT_TYPEDEF_P (NODE)) \ + && DECL_FUNCTION_SCOPE_P (NODE)) + +/* Discriminator for name mangling. */ +#define DECL_DISCRIMINATOR(NODE) (LANG_DECL_MIN_CHECK (NODE)->access) + +/* The index of a user-declared parameter in its function, starting at 1. + All artificial parameters will have index 0. */ +#define DECL_PARM_INDEX(NODE) \ + (LANG_DECL_PARM_CHECK (NODE)->index) + +/* The level of a user-declared parameter in its function, starting at 1. + A parameter of the function will have level 1; a parameter of the first + nested function declarator (i.e. t in void f (void (*p)(T t))) will have + level 2. */ +#define DECL_PARM_LEVEL(NODE) \ + (LANG_DECL_PARM_CHECK (NODE)->level) + +/* Nonzero if the VTT parm has been added to NODE. */ +#define DECL_HAS_VTT_PARM_P(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p) + +/* Nonzero if NODE is a user-defined conversion operator. */ +#define DECL_CONV_FN_P(NODE) IDENTIFIER_CONV_OP_P (DECL_NAME (NODE)) + +/* The type to which conversion operator FN converts to. */ +#define DECL_CONV_FN_TYPE(FN) \ + TREE_TYPE ((gcc_checking_assert (DECL_CONV_FN_P (FN)), DECL_NAME (FN))) + +/* Nonzero if NODE, a static data member, was declared in its class as an + array of unknown bound. */ +#define VAR_HAD_UNKNOWN_BOUND(NODE) \ + (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ + ? DECL_LANG_SPECIFIC (NODE)->u.base.unknown_bound_p \ + : false) +#define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \ + (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.unknown_bound_p = true) + +/* True iff decl NODE is for an overloaded operator. */ +#define DECL_OVERLOADED_OPERATOR_P(NODE) \ + IDENTIFIER_ANY_OP_P (DECL_NAME (NODE)) + +/* Nonzero if NODE is an assignment operator (including += and such). */ +#define DECL_ASSIGNMENT_OPERATOR_P(NODE) \ + IDENTIFIER_ASSIGN_OP_P (DECL_NAME (NODE)) + +/* NODE is a function_decl for an overloaded operator. Return its + compressed (raw) operator code. Note that this is not a TREE_CODE. */ +#define DECL_OVERLOADED_OPERATOR_CODE_RAW(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->ovl_op_code) + +/* DECL is an overloaded operator. Test whether it is for TREE_CODE + (a literal constant). */ +#define DECL_OVERLOADED_OPERATOR_IS(DECL, CODE) \ + (DECL_OVERLOADED_OPERATOR_CODE_RAW (DECL) == OVL_OP_##CODE) + +/* For FUNCTION_DECLs: nonzero means that this function is a + constructor or a destructor with an extra in-charge parameter to + control whether or not virtual bases are constructed. */ +#define DECL_HAS_IN_CHARGE_PARM_P(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p) + +/* Nonzero if DECL is a declaration of __builtin_constant_p. */ +#define DECL_IS_BUILTIN_CONSTANT_P(NODE) \ + (TREE_CODE (NODE) == FUNCTION_DECL \ + && DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \ + && DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P) + +/* Nonzero for _DECL means that this decl appears in (or will appear + in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for + detecting circularity in case members are multiply defined. In the + case of a VAR_DECL, it means that no definition has been seen, even + if an initializer has been. */ +#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE)) + +/* Nonzero for a VAR_DECL means that the variable's initialization (if + any) has been processed. (In general, DECL_INITIALIZED_P is + !DECL_EXTERNAL, but static data members may be initialized even if + not defined.) */ +#define DECL_INITIALIZED_P(NODE) \ + (TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE))) + +/* Nonzero for a VAR_DECL iff an explicit initializer was provided + or a non-trivial constructor is called. */ +#define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \ + (TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE))) + +/* Nonzero for a VAR_DECL that was initialized with a + constant-expression. */ +#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \ + (TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE))) + +/* Nonzero if the DECL was initialized in the class definition itself, + rather than outside the class. This is used for both static member + VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */ +#define DECL_INITIALIZED_IN_CLASS_P(DECL) \ + (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ + ->u.base.initialized_in_class) + +/* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr]. + Only available for decls with DECL_LANG_SPECIFIC. */ +#define DECL_ODR_USED(DECL) \ + (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ + ->u.base.odr_used) + +/* Nonzero for FUNCTION_DECL means that this is a friend that is + either not pushed into a namespace/looked up in a class (because it + is a dependent type, in an uninstantiated template), or it has + /only/ been subject to hidden friend injection from one or more + befriending classes. Once another decl matches, the flag is + cleared. There are requirements on its default parms. */ +#define DECL_UNIQUE_FRIEND_P(NODE) \ + (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (NODE)) \ + ->u.base.friend_or_tls) + +/* True of a TEMPLATE_DECL that is a template class friend. Such + decls are not pushed until instantiated (as they may depend on + parameters of the befriending class). DECL_CHAIN is the + befriending class. */ +#define DECL_UNINSTANTIATED_TEMPLATE_FRIEND_P(NODE) \ + (DECL_LANG_FLAG_4 (TEMPLATE_DECL_CHECK (NODE))) + +/* Nonzero if the thread-local variable was declared with __thread as + opposed to thread_local. */ +#define DECL_GNU_TLS_P(NODE) \ + (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ + && DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls) +#define SET_DECL_GNU_TLS_P(NODE) \ + (retrofit_lang_decl (VAR_DECL_CHECK (NODE)), \ + DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls = true) + +/* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */ +#define DECL_BEFRIENDING_CLASSES(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->befriending_classes) + +/* Nonzero for FUNCTION_DECL means that this decl is a static + member function. */ +#define DECL_STATIC_FUNCTION_P(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->static_function) + +/* Nonzero for FUNCTION_DECL means that this decl is a non-static + member function. */ +#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \ + (TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE) + +/* Nonzero for FUNCTION_DECL means that this decl is a member function + (static or non-static). */ +#define DECL_FUNCTION_MEMBER_P(NODE) \ + (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE)) + +/* Nonzero for FUNCTION_DECL means that this member function + has `this' as const X *const. */ +#define DECL_CONST_MEMFUNC_P(NODE) \ + (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ + && CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \ + (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) + +/* Nonzero for FUNCTION_DECL means that this member function + has `this' as volatile X *const. */ +#define DECL_VOLATILE_MEMFUNC_P(NODE) \ + (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ + && CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \ + (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) + +/* Nonzero for a DECL means that this member is a non-static member. */ +#define DECL_NONSTATIC_MEMBER_P(NODE) \ + (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ + || TREE_CODE (NODE) == FIELD_DECL) + +/* Nonzero for a FIELD_DECL means that this member object type + is mutable. */ +#define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (FIELD_DECL_CHECK (NODE))) + +/* Nonzero for _DECL means that this constructor or conversion function is + non-converting. */ +#define DECL_NONCONVERTING_P(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->nonconverting) + +/* Nonzero for FUNCTION_DECL means that this member function is a pure + virtual function. */ +#define DECL_PURE_VIRTUAL_P(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->pure_virtual) + +/* Nonzero for FUNCTION_DECL means that this member function (either + a constructor or a conversion function) has an explicit specifier + with a value-dependent expression. */ +#define DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->has_dependent_explicit_spec_p) + +/* Nonzero for a defaulted FUNCTION_DECL for which we haven't decided yet if + it's deleted; we will decide in synthesize_method. */ +#define DECL_MAYBE_DELETED(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->maybe_deleted) + +/* True (in a FUNCTION_DECL) if NODE is a virtual function that is an + invalid overrider for a function from a base class. Once we have + complained about an invalid overrider we avoid complaining about it + again. */ +#define DECL_INVALID_OVERRIDER_P(NODE) \ + (DECL_LANG_FLAG_4 (NODE)) + +/* True (in a FUNCTION_DECL) if NODE is a function declared with + an override virt-specifier */ +#define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE)) + +/* The thunks associated with NODE, a FUNCTION_DECL. */ +#define DECL_THUNKS(NODE) \ + (DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE) + +/* Set DECL_THUNKS. */ +#define SET_DECL_THUNKS(NODE,THUNKS) \ + (LANG_DECL_FN_CHECK (NODE)->context = (THUNKS)) + +/* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this + is the constructor it inherits from. */ +#define DECL_INHERITED_CTOR(NODE) \ + (DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \ + ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE) + +/* And this is the base that constructor comes from. */ +#define DECL_INHERITED_CTOR_BASE(NODE) \ + (DECL_INHERITED_CTOR (NODE) \ + ? DECL_CONTEXT (flag_new_inheriting_ctors \ + ? strip_inheriting_ctors (NODE) \ + : DECL_INHERITED_CTOR (NODE)) \ + : NULL_TREE) + +/* Set the inherited base. */ +#define SET_DECL_INHERITED_CTOR(NODE,INH) \ + (LANG_DECL_FN_CHECK (NODE)->context = (INH)) + +/* Nonzero if NODE is a thunk, rather than an ordinary function. */ +#define DECL_THUNK_P(NODE) \ + (TREE_CODE (NODE) == FUNCTION_DECL \ + && DECL_LANG_SPECIFIC (NODE) \ + && LANG_DECL_FN_CHECK (NODE)->thunk_p) + +/* Set DECL_THUNK_P for node. */ +#define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \ + (LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \ + LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING)) + +/* Nonzero if NODE is a this pointer adjusting thunk. */ +#define DECL_THIS_THUNK_P(NODE) \ + (DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p) + +/* Nonzero if NODE is a result pointer adjusting thunk. */ +#define DECL_RESULT_THUNK_P(NODE) \ + (DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p) + +/* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */ +#define DECL_NON_THUNK_FUNCTION_P(NODE) \ + (TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE)) + +/* Nonzero if NODE is `extern "C"'. */ +#define DECL_EXTERN_C_P(NODE) \ + (DECL_LANGUAGE (NODE) == lang_c) + +/* Nonzero if NODE is an `extern "C"' function. */ +#define DECL_EXTERN_C_FUNCTION_P(NODE) \ + (DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE)) + +/* Non-zero if this variable is declared `constinit' specifier. */ +#define DECL_DECLARED_CONSTINIT_P(NODE) \ + (DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE))) + +/* True if DECL is declared 'constexpr'. */ +#define DECL_DECLARED_CONSTEXPR_P(DECL) \ + DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL))) + +/* True if FNDECL is an immediate function. */ +#define DECL_IMMEDIATE_FUNCTION_P(NODE) \ + (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (STRIP_TEMPLATE (NODE))) \ + ? LANG_DECL_FN_CHECK (NODE)->immediate_fn_p \ + : false) +#define SET_DECL_IMMEDIATE_FUNCTION_P(NODE) \ + (retrofit_lang_decl (FUNCTION_DECL_CHECK (NODE)), \ + LANG_DECL_FN_CHECK (NODE)->immediate_fn_p = true) + +// True if NODE was declared as 'concept'. The flag implies that the +// declaration is constexpr, that the declaration cannot be specialized or +// refined, and that the result type must be convertible to bool. +#define DECL_DECLARED_CONCEPT_P(NODE) \ + (DECL_LANG_SPECIFIC (NODE)->u.base.concept_p) + +/* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a + template function. */ +#define DECL_PRETTY_FUNCTION_P(NODE) \ + (DECL_NAME (NODE) \ + && id_equal (DECL_NAME (NODE), "__PRETTY_FUNCTION__")) + +/* For a DECL, true if it is __func__ or similar. */ +#define DECL_FNAME_P(NODE) \ + (VAR_P (NODE) && DECL_NAME (NODE) && DECL_ARTIFICIAL (NODE) \ + && DECL_HAS_VALUE_EXPR_P (NODE) \ + && (id_equal (DECL_NAME (NODE), "__PRETTY_FUNCTION__") \ + || id_equal (DECL_NAME (NODE), "__FUNCTION__") \ + || id_equal (DECL_NAME (NODE), "__func__"))) + +/* Nonzero if the variable was declared to be thread-local. + We need a special C++ version of this test because the middle-end + DECL_THREAD_LOCAL_P uses the symtab, so we can't use it for + templates. */ +#define CP_DECL_THREAD_LOCAL_P(NODE) \ + (TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))) + +/* The _TYPE context in which this _DECL appears. This field holds the + class where a virtual function instance is actually defined. */ +#define DECL_CLASS_CONTEXT(NODE) \ + (DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE) + +/* For a non-member friend function, the class (if any) in which this + friend was defined. For example, given: + + struct S { friend void f () { ... } }; + + the DECL_FRIEND_CONTEXT for `f' will be `S'. */ +#define DECL_FRIEND_CONTEXT(NODE) \ + ((DECL_DECLARES_FUNCTION_P (NODE) && !DECL_VIRTUAL_P (NODE) \ + && !DECL_CONSTRUCTOR_P (NODE)) \ + ? LANG_DECL_FN_CHECK (NODE)->context \ + : NULL_TREE) + +/* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */ +#define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \ + (LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT)) + +#define CP_DECL_CONTEXT(NODE) \ + (!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace) +#define CP_TYPE_CONTEXT(NODE) \ + (!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace) +#define FROB_CONTEXT(NODE) \ + ((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE)) + +/* 1 iff NODE has namespace scope, including the global namespace. */ +#define DECL_NAMESPACE_SCOPE_P(NODE) \ + (!DECL_TEMPLATE_PARM_P (NODE) \ + && TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL) + +#define TYPE_NAMESPACE_SCOPE_P(NODE) \ + (TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL) + +#define NAMESPACE_SCOPE_P(NODE) \ + ((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \ + || (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE))) + +/* 1 iff NODE is a class member. */ +#define DECL_CLASS_SCOPE_P(NODE) \ + (DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE))) + +#define TYPE_CLASS_SCOPE_P(NODE) \ + (TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE))) + +/* 1 iff NODE is function-local. */ +#define DECL_FUNCTION_SCOPE_P(NODE) \ + (DECL_CONTEXT (NODE) \ + && TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL) + +#define TYPE_FUNCTION_SCOPE_P(NODE) \ + (TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL) + +/* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for + both the primary typeinfo object and the associated NTBS name. */ +#define DECL_TINFO_P(NODE) \ + TREE_LANG_FLAG_4 (TREE_CHECK2 (NODE,VAR_DECL,TYPE_DECL)) + +/* 1 iff VAR_DECL node NODE is virtual table or VTT. We forward to + DECL_VIRTUAL_P from the common code, as that has the semantics we + need. But we want a more descriptive name. */ +#define DECL_VTABLE_OR_VTT_P(NODE) DECL_VIRTUAL_P (VAR_DECL_CHECK (NODE)) + +/* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */ +#define FUNCTION_REF_QUALIFIED(NODE) \ + TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE)) + +/* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */ +#define FUNCTION_RVALUE_QUALIFIED(NODE) \ + TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE)) + +/* 1 iff NODE is function-local, but for types. */ +#define LOCAL_CLASS_P(NODE) \ + (decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE) + +/* The nesting depth of namespace, class or function. Makes is_ancestor much + simpler. Only 8 bits available. */ +#define SCOPE_DEPTH(NODE) \ + (NAMESPACE_DECL_CHECK (NODE)->base.u.bits.address_space) + +/* Whether the namepace is an inline namespace. */ +#define DECL_NAMESPACE_INLINE_P(NODE) \ + TREE_LANG_FLAG_0 (NAMESPACE_DECL_CHECK (NODE)) + +/* In a NAMESPACE_DECL, a vector of inline namespaces. */ +#define DECL_NAMESPACE_INLINEES(NODE) \ + (LANG_DECL_NS_CHECK (NODE)->inlinees) + +/* Pointer to hash_map from IDENTIFIERS to DECLS */ +#define DECL_NAMESPACE_BINDINGS(NODE) \ + (LANG_DECL_NS_CHECK (NODE)->bindings) + +/* In a NAMESPACE_DECL, points to the original namespace if this is + a namespace alias. */ +#define DECL_NAMESPACE_ALIAS(NODE) \ + DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE)) +#define ORIGINAL_NAMESPACE(NODE) \ + (DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE)) + +/* Nonzero if NODE is the std namespace. */ +#define DECL_NAMESPACE_STD_P(NODE) \ + ((NODE) == std_node) + +/* In a TREE_LIST in an attribute list, indicates that the attribute + must be applied at instantiation time. */ +#define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) + +/* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag + was inherited from a template parameter, not explicitly indicated. */ +#define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) + +/* In a TREE_LIST for a parameter-declaration-list, indicates that all the + parameters in the list have declarators enclosed in (). */ +#define PARENTHESIZED_LIST_P(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) + +/* Non zero if this is a using decl for a dependent scope. */ +#define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE)) + +/* The scope named in a using decl. */ +#define USING_DECL_SCOPE(NODE) DECL_RESULT_FLD (USING_DECL_CHECK (NODE)) + +/* The decls named by a using decl. */ +#define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE)) + +/* Non zero if the using decl refers to a dependent type. */ +#define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE)) + +/* True if member using decl NODE refers to a non-inherited NODE. */ +#define USING_DECL_UNRELATED_P(NODE) DECL_LANG_FLAG_2 (USING_DECL_CHECK (NODE)) + +/* True iff the CONST_DECL is a class-scope clone from C++20 using enum, + created by handle_using_decl. */ +#define CONST_DECL_USING_P(NODE) \ + (TREE_CODE (NODE) == CONST_DECL \ + && TREE_TYPE (NODE) \ + && TREE_CODE (TREE_TYPE (NODE)) == ENUMERAL_TYPE \ + && DECL_CONTEXT (NODE) != TREE_TYPE (NODE)) + +/* In a FUNCTION_DECL, this is nonzero if this function was defined in + the class definition. We have saved away the text of the function, + but have not yet processed it. */ +#define DECL_PENDING_INLINE_P(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->pending_inline_p) + +/* If DECL_PENDING_INLINE_P holds, this is the saved text of the + function. */ +#define DECL_PENDING_INLINE_INFO(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info) + +/* Nonzero for TYPE_DECL means that it was written 'using name = type'. */ +#define TYPE_DECL_ALIAS_P(NODE) \ + DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE)) + +/* Nonzero for TEMPLATE_DECL means that it is a 'complex' alias template. */ +#define TEMPLATE_DECL_COMPLEX_ALIAS_P(NODE) \ + DECL_LANG_FLAG_2 (TEMPLATE_DECL_CHECK (NODE)) + +/* Nonzero for a type which is an alias for another type; i.e, a type + which declaration was written 'using name-of-type = + another-type'. */ +#define TYPE_ALIAS_P(NODE) \ + (TYPE_P (NODE) \ + && TYPE_NAME (NODE) \ + && TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \ + && TYPE_DECL_ALIAS_P (TYPE_NAME (NODE))) + +/* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL, TEMPLATE_DECL, + or CONCEPT_DECL, the entity is either a template specialization (if + DECL_USE_TEMPLATE is nonzero) or the abstract instance of the + template itself. + + In either case, DECL_TEMPLATE_INFO is a TEMPLATE_INFO, whose + TI_TEMPLATE is the TEMPLATE_DECL of which this entity is a + specialization or abstract instance. The TI_ARGS is the + template arguments used to specialize the template. + + Consider: + + template struct S { friend void f(T) {} }; + + In this case, S::f is, from the point of view of the compiler, + an instantiation of a template -- but, from the point of view of + the language, each instantiation of S results in a wholly unrelated + global function f. In this case, DECL_TEMPLATE_INFO for S::f + will be non-NULL, but DECL_USE_TEMPLATE will be zero. + + In a friend declaration, TI_TEMPLATE can be an overload set, or + identifier. */ +#define DECL_TEMPLATE_INFO(NODE) \ + (DECL_LANG_SPECIFIC (TEMPLATE_INFO_DECL_CHECK (NODE)) \ + ->u.min.template_info) + +/* For a lambda capture proxy, its captured variable. */ +#define DECL_CAPTURED_VARIABLE(NODE) \ + (LANG_DECL_MIN_CHECK (NODE)->access) + +/* For a VAR_DECL, indicates that the variable is actually a + non-static data member of anonymous union that has been promoted to + variable status. */ +#define DECL_ANON_UNION_VAR_P(NODE) \ + (DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))) + +/* Template information for a RECORD_TYPE or UNION_TYPE. */ +#define CLASSTYPE_TEMPLATE_INFO(NODE) \ + (TYPE_LANG_SLOT_1 (RECORD_OR_UNION_CHECK (NODE))) + +/* Template information for a template template parameter. */ +#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \ + (TYPE_LANG_SLOT_1 (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE))) + +/* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or + BOUND_TEMPLATE_TEMPLATE_PARM type. This ignores any alias + templateness of NODE. It'd be nice if this could unconditionally + access the slot, rather than return NULL if given a + non-templatable type. */ +#define TYPE_TEMPLATE_INFO(NODE) \ + (TREE_CODE (NODE) == ENUMERAL_TYPE \ + || TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM \ + || RECORD_OR_UNION_TYPE_P (NODE) \ + ? TYPE_LANG_SLOT_1 (NODE) : NULL_TREE) + +/* Template information (if any) for an alias type. */ +#define TYPE_ALIAS_TEMPLATE_INFO(NODE) \ + (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \ + ? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \ + : NULL_TREE) + +/* If NODE is a type alias, this accessor returns the template info + for the alias template (if any). Otherwise behave as + TYPE_TEMPLATE_INFO. */ +#define TYPE_TEMPLATE_INFO_MAYBE_ALIAS(NODE) \ + (typedef_variant_p (NODE) \ + ? TYPE_ALIAS_TEMPLATE_INFO (NODE) \ + : TYPE_TEMPLATE_INFO (NODE)) + +/* Set the template information for a non-alias n ENUMERAL_, RECORD_, + or UNION_TYPE to VAL. ALIAS's are dealt with separately. */ +#define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \ + (TREE_CODE (NODE) == ENUMERAL_TYPE \ + || (CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \ + ? (TYPE_LANG_SLOT_1 (NODE) = (VAL)) \ + : (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL))) \ + +#define TI_TEMPLATE(NODE) \ + ((struct tree_template_info*)TEMPLATE_INFO_CHECK (NODE))->tmpl +#define TI_ARGS(NODE) \ + ((struct tree_template_info*)TEMPLATE_INFO_CHECK (NODE))->args +#define TI_PENDING_TEMPLATE_FLAG(NODE) \ + TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE)) +/* For a given TREE_VEC containing a template argument list, + this property contains the number of arguments that are not + defaulted. */ +#define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ + TREE_CHAIN (TREE_VEC_CHECK (NODE)) + +/* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT + property. */ +#define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \ + NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE) +#if CHECKING_P +#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ + int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) +#else +#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ + NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \ + ? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \ + : TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE)) +#endif + +/* The list of access checks that were deferred during parsing + which need to be performed at template instantiation time. + + FIXME this should be associated with the TEMPLATE_DECL, not the + TEMPLATE_INFO. */ +#define TI_DEFERRED_ACCESS_CHECKS(NODE) \ + ((struct tree_template_info*)TEMPLATE_INFO_CHECK \ + (NODE))->deferred_access_checks + +/* We use TREE_VECs to hold template arguments. If there is only one + level of template arguments, then the TREE_VEC contains the + arguments directly. If there is more than one level of template + arguments, then each entry in the TREE_VEC is itself a TREE_VEC, + containing the template arguments for a single level. The first + entry in the outer TREE_VEC is the outermost level of template + parameters; the last is the innermost. + + It is incorrect to ever form a template argument vector containing + only one level of arguments, but which is a TREE_VEC containing as + its only entry the TREE_VEC for that level. + + For each TREE_VEC containing the template arguments for a single + level, it's possible to get or set the number of non defaulted + template arguments by using the accessor macros + GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or + SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */ + +/* Nonzero if the template arguments is actually a vector of vectors, + rather than just a vector. */ +#define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \ + (NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \ + && TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC) + +/* The depth of a template argument vector. When called directly by + the parser, we use a TREE_LIST rather than a TREE_VEC to represent + template arguments. In that case, there is only one level of template + arguments. We may even see NULL_TREE if there are 0 levels of + template arguments, as in cp_parser_requires_expression. */ +#define TMPL_ARGS_DEPTH(NODE) \ + ((NODE) == NULL_TREE ? 0 \ + : TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) \ + : 1) + +/* The LEVELth level of the template ARGS. The outermost level of + args is level 1, not level 0. */ +#define TMPL_ARGS_LEVEL(ARGS, LEVEL) \ + (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \ + ? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS)) + +/* Set the LEVELth level of the template ARGS to VAL. This macro does + not work with single-level argument vectors. */ +#define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \ + (TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL)) + +/* Accesses the IDXth parameter in the LEVELth level of the ARGS. */ +#define TMPL_ARG(ARGS, LEVEL, IDX) \ + (TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX)) + +/* Given a single level of template arguments in NODE, return the + number of arguments. */ +#define NUM_TMPL_ARGS(NODE) \ + (TREE_VEC_LENGTH (NODE)) + +/* Returns the innermost level of template arguments in ARGS. */ +#define INNERMOST_TEMPLATE_ARGS(NODE) \ + (get_innermost_template_args ((NODE), 1)) + +/* The number of levels of template parameters given by NODE. */ +#define TMPL_PARMS_DEPTH(NODE) \ + ((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE))) + +/* The TEMPLATE_DECL instantiated or specialized by NODE. This + TEMPLATE_DECL will be the immediate parent, not the most general + template. For example, in: + + template struct S { template void f(U); } + + the FUNCTION_DECL for S::f will have, as its + DECL_TI_TEMPLATE, `template S::f'. + + As a special case, for a member friend template of a template + class, this value will not be a TEMPLATE_DECL, but rather an + IDENTIFIER_NODE or OVERLOAD indicating the name of the template and + any explicit template arguments provided. For example, in: + + template struct S { friend void f(int, double); } + + the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the + DECL_TI_ARGS will be {int}. + + For a FIELD_DECL with a non-static data member initializer, this value + is the FIELD_DECL it was instantiated from. */ +#define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE)) + +/* The template arguments used to obtain this decl from the most + general form of DECL_TI_TEMPLATE. For the example given for + DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These + are always the full set of arguments required to instantiate this + declaration from the most general template specialized here. */ +#define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE)) + +/* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE + will be generated from a partial specialization, the TEMPLATE_DECL + referred to here will be the original template. For example, + given: + + template struct S {}; + template struct S {}; + + the CLASSTYPE_TI_TEMPLATE for S will be S, not the S. + + For a member class template, CLASSTYPE_TI_TEMPLATE always refers to the + partial instantiation rather than the primary template. CLASSTYPE_TI_ARGS + are for the primary template if the partial instantiation isn't + specialized, or for the explicit specialization if it is, e.g. + + template class C { template class D; } + template <> template class C::D; */ +#define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE)) +#define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE)) + +/* For a template instantiation TYPE, returns the TYPE corresponding + to the primary template. Otherwise returns TYPE itself. */ +#define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \ + ((CLASSTYPE_USE_TEMPLATE ((TYPE)) \ + && !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \ + ? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \ + (CLASSTYPE_TI_TEMPLATE ((TYPE))))) \ + : (TYPE)) + +/* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */ +#define TYPE_TI_TEMPLATE(NODE) \ + (TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE))) + +/* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */ +#define TYPE_TI_ARGS(NODE) \ + (TI_ARGS (TYPE_TEMPLATE_INFO (NODE))) + +#define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE) + +/* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the + sense of [temp.mem]. */ +#define DECL_MEMBER_TEMPLATE_P(NODE) \ + (DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE))) + +/* Nonzero if the NODE corresponds to the template parameters for a + member template, whose inline definition is being processed after + the class definition is complete. */ +#define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE) + +/* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */ +#define DECL_PACK_P(NODE) \ + (DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE))) + +/* Determines if NODE is an expansion of one or more parameter packs, + e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ +#define PACK_EXPANSION_P(NODE) \ + (TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ + || TREE_CODE (NODE) == EXPR_PACK_EXPANSION) + +/* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or + EXPR_PACK_EXPANSION. */ +#define PACK_EXPANSION_PATTERN(NODE) \ + (TREE_CODE (NODE) == TYPE_PACK_EXPANSION ? TREE_TYPE (NODE) \ + : TREE_OPERAND (NODE, 0)) + +/* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or + EXPR_PACK_EXPANSION. */ +#define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \ + if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \ + TREE_TYPE (NODE) = VALUE; \ + else \ + TREE_OPERAND (NODE, 0) = VALUE + +/* The list of parameter packs used in the PACK_EXPANSION_* node. The + TREE_VALUE of each TREE_LIST contains the parameter packs. */ +#define PACK_EXPANSION_PARAMETER_PACKS(NODE) \ + *(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \ + ? &TREE_OPERAND (NODE, 1) \ + : &TYPE_MIN_VALUE_RAW (TYPE_PACK_EXPANSION_CHECK (NODE))) + +/* Any additional template args to be applied when substituting into + the pattern, set by tsubst_pack_expansion for partial instantiations. + If this is a TREE_LIST, the TREE_VALUE of the first element is the + usual template argument TREE_VEC, and the TREE_PURPOSE of later elements + are enclosing functions that provided function parameter packs we'll need + to map appropriately. */ +#define PACK_EXPANSION_EXTRA_ARGS(NODE) \ + *(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ + ? &TYPE_MAX_VALUE_RAW (NODE) \ + : &TREE_OPERAND ((NODE), 2)) + +/* True iff this pack expansion is within a function context. */ +#define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE) + +/* True iff this pack expansion is for sizeof.... */ +#define PACK_EXPANSION_SIZEOF_P(NODE) TREE_LANG_FLAG_1 (NODE) + +/* True iff this pack expansion is for auto... in lambda init-capture. */ +#define PACK_EXPANSION_AUTO_P(NODE) TREE_LANG_FLAG_2 (NODE) + +/* True if we must use PACK_EXPANSION_EXTRA_ARGS and avoid partial + instantiation of this pack expansion. */ +#define PACK_EXPANSION_FORCE_EXTRA_ARGS_P(NODE) TREE_LANG_FLAG_3 (NODE) + +/* True iff the wildcard can match a template parameter pack. */ +#define WILDCARD_PACK_P(NODE) TREE_LANG_FLAG_0 (NODE) + +/* Determine if this is an argument pack. */ +#define ARGUMENT_PACK_P(NODE) \ + (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \ + || TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK) + +/* The arguments stored in an argument pack. Arguments are stored in a + TREE_VEC, which may have length zero. */ +#define ARGUMENT_PACK_ARGS(NODE) \ + (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \ + : TREE_OPERAND (NODE, 0)) + +/* Set the arguments stored in an argument pack. VALUE must be a + TREE_VEC. */ +#define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \ + if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \ + TREE_TYPE (NODE) = VALUE; \ + else \ + TREE_OPERAND (NODE, 0) = VALUE + +/* Whether the argument pack is "incomplete", meaning that more + arguments can still be deduced. Incomplete argument packs are only + used when the user has provided an explicit template argument list + for a variadic function template. Some of the explicit template + arguments will be placed into the beginning of the argument pack, + but additional arguments might still be deduced. */ +#define ARGUMENT_PACK_INCOMPLETE_P(NODE) \ + TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE)) + +/* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template + arguments used to fill this pack. */ +#define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \ + TREE_TYPE (ARGUMENT_PACK_ARGS (NODE)) + +/* In an ARGUMENT_PACK_SELECT, the argument pack from which an + argument will be selected. */ +#define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \ + (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack) + +/* In an ARGUMENT_PACK_SELECT, the index of the argument we want to + select. */ +#define ARGUMENT_PACK_SELECT_INDEX(NODE) \ + (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index) + +#define FOLD_EXPR_CHECK(NODE) \ + TREE_CHECK4 (NODE, UNARY_LEFT_FOLD_EXPR, UNARY_RIGHT_FOLD_EXPR, \ + BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR) + +#define BINARY_FOLD_EXPR_CHECK(NODE) \ + TREE_CHECK2 (NODE, BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR) + +/* True if NODE is UNARY_FOLD_EXPR or a BINARY_FOLD_EXPR */ +#define FOLD_EXPR_P(NODE) \ + (TREE_CODE (NODE) == UNARY_LEFT_FOLD_EXPR \ + || TREE_CODE (NODE) == UNARY_RIGHT_FOLD_EXPR \ + || TREE_CODE (NODE) == BINARY_LEFT_FOLD_EXPR \ + || TREE_CODE (NODE) == BINARY_RIGHT_FOLD_EXPR) + +/* True when NODE is a fold over a compound assignment operator. */ +#define FOLD_EXPR_MODIFY_P(NODE) \ + TREE_LANG_FLAG_0 (FOLD_EXPR_CHECK (NODE)) + +/* An INTEGER_CST containing the tree code of the folded operator. */ +#define FOLD_EXPR_OP_RAW(NODE) \ + TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 0) + +/* The tree code of the folded operator. */ +#define FOLD_EXPR_OP(NODE) \ + ((enum tree_code) TREE_INT_CST_LOW (FOLD_EXPR_OP_RAW (NODE))) + +/* The expression containing an unexpanded parameter pack. */ +#define FOLD_EXPR_PACK(NODE) \ + TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 1) + +/* In a binary fold expression, the argument with no unexpanded + parameter packs. */ +#define FOLD_EXPR_INIT(NODE) \ + TREE_OPERAND (BINARY_FOLD_EXPR_CHECK (NODE), 2) + +/* In a FUNCTION_DECL, the saved auto-return pattern. */ +#define DECL_SAVED_AUTO_RETURN_TYPE(NODE) \ + (LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \ + ->u.saved_auto_return_type) + +/* True if NODE is an implicit INDIRECT_REF from convert_from_reference. */ +#define REFERENCE_REF_P(NODE) \ + (INDIRECT_REF_P (NODE) \ + && TREE_TYPE (TREE_OPERAND (NODE, 0)) \ + && TYPE_REF_P (TREE_TYPE (TREE_OPERAND ((NODE), 0)))) + +/* True iff this represents an lvalue being treated as an rvalue during return + or throw as per [class.copy.elision]. */ +#define IMPLICIT_RVALUE_P(NODE) \ + TREE_LANG_FLAG_3 (TREE_CHECK2 ((NODE), NON_LVALUE_EXPR, STATIC_CAST_EXPR)) + +#define NEW_EXPR_USE_GLOBAL(NODE) \ + TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE)) +#define DELETE_EXPR_USE_GLOBAL(NODE) \ + TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE)) +#define DELETE_EXPR_USE_VEC(NODE) \ + TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE)) + +#define CALL_OR_AGGR_INIT_CHECK(NODE) \ + TREE_CHECK2 ((NODE), CALL_EXPR, AGGR_INIT_EXPR) + +/* In a CALL_EXPR appearing in a template, true if Koenig lookup + should be performed at instantiation time. */ +#define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE)) + +/* True if the arguments to NODE should be evaluated in left-to-right + order regardless of PUSH_ARGS_REVERSED. */ +#define CALL_EXPR_ORDERED_ARGS(NODE) \ + TREE_LANG_FLAG_3 (CALL_OR_AGGR_INIT_CHECK (NODE)) + +/* True if the arguments to NODE should be evaluated in right-to-left + order regardless of PUSH_ARGS_REVERSED. */ +#define CALL_EXPR_REVERSE_ARGS(NODE) \ + TREE_LANG_FLAG_5 (CALL_OR_AGGR_INIT_CHECK (NODE)) + +/* True if CALL_EXPR was written as an operator expression, not a function + call. */ +#define CALL_EXPR_OPERATOR_SYNTAX(NODE) \ + TREE_LANG_FLAG_6 (CALL_OR_AGGR_INIT_CHECK (NODE)) + +/* A TREE_LIST containing the result of phase 1 name lookup of the operator + overloads that are pertinent to the dependent operator expression whose + type is NODE. Each TREE_PURPOSE is an IDENTIFIER_NODE and TREE_VALUE is + the corresponding (possibly empty) lookup result. The TREE_TYPE of the + first TREE_LIST node points back to NODE. */ +#define DEPENDENT_OPERATOR_TYPE_SAVED_LOOKUPS(NODE) \ + TYPE_VALUES_RAW (DEPENDENT_OPERATOR_TYPE_CHECK (NODE)) + +/* Guarded helper for the above accessor macro that takes a (templated) + operator expression instead of the type thereof. */ +inline tree +templated_operator_saved_lookups (tree t) +{ + tree type = TREE_TYPE (EXPR_CHECK (t)); + if (type && TREE_CODE (type) == DEPENDENT_OPERATOR_TYPE) + return DEPENDENT_OPERATOR_TYPE_SAVED_LOOKUPS (type); + else + return NULL_TREE; +} + +/* Indicates whether a string literal has been parenthesized. Such + usages are disallowed in certain circumstances. */ + +#define PAREN_STRING_LITERAL_P(NODE) \ + TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE)) + +/* Indicates whether a COMPONENT_REF or a SCOPE_REF has been parenthesized, an + INDIRECT_REF comes from parenthesizing a _DECL, or a PAREN_EXPR identifies a + parenthesized initializer relevant for decltype(auto). Currently only set + some of the time in C++14 mode. */ + +#define REF_PARENTHESIZED_P(NODE) \ + TREE_LANG_FLAG_2 (TREE_CHECK5 ((NODE), COMPONENT_REF, INDIRECT_REF, SCOPE_REF, VIEW_CONVERT_EXPR, PAREN_EXPR)) + +/* Nonzero if this AGGR_INIT_EXPR provides for initialization via a + constructor call, rather than an ordinary function call. */ +#define AGGR_INIT_VIA_CTOR_P(NODE) \ + TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE)) + +/* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize + the object. */ +#define AGGR_INIT_ZERO_FIRST(NODE) \ + TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE)) + +/* Nonzero means that the call is the jump from a thunk to the + thunked-to function. */ +#define AGGR_INIT_FROM_THUNK_P(NODE) \ + (AGGR_INIT_EXPR_CHECK (NODE)->base.protected_flag) + +/* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR + accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of + CALL_EXPR_STATIC_CHAIN). */ + +#define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1) +#define AGGR_INIT_EXPR_SLOT(NODE) \ + TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2) +#define AGGR_INIT_EXPR_ARG(NODE, I) \ + TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3) +#define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3) + +/* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE. + We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if + the argument count is zero when checking is enabled. Instead, do + the pointer arithmetic to advance past the 3 fixed operands in a + AGGR_INIT_EXPR. That produces a valid pointer to just past the end of + the operand array, even if it's not valid to dereference it. */ +#define AGGR_INIT_EXPR_ARGP(NODE) \ + (&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3) + +/* Abstract iterators for AGGR_INIT_EXPRs. */ + +/* Structure containing iterator state. */ +struct aggr_init_expr_arg_iterator { + tree t; /* the aggr_init_expr */ + int n; /* argument count */ + int i; /* next argument index */ +}; + +/* Initialize the abstract argument list iterator object ITER with the + arguments from AGGR_INIT_EXPR node EXP. */ +inline void +init_aggr_init_expr_arg_iterator (tree exp, + aggr_init_expr_arg_iterator *iter) +{ + iter->t = exp; + iter->n = aggr_init_expr_nargs (exp); + iter->i = 0; +} + +/* Return the next argument from abstract argument list iterator object ITER, + and advance its state. Return NULL_TREE if there are no more arguments. */ +inline tree +next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter) +{ + tree result; + if (iter->i >= iter->n) + return NULL_TREE; + result = AGGR_INIT_EXPR_ARG (iter->t, iter->i); + iter->i++; + return result; +} + +/* Initialize the abstract argument list iterator object ITER, then advance + past and return the first argument. Useful in for expressions, e.g. + for (arg = first_aggr_init_expr_arg (exp, &iter); arg; + arg = next_aggr_init_expr_arg (&iter)) */ +inline tree +first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter) +{ + init_aggr_init_expr_arg_iterator (exp, iter); + return next_aggr_init_expr_arg (iter); +} + +/* Test whether there are more arguments in abstract argument list iterator + ITER, without changing its state. */ +inline bool +more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter) +{ + return (iter->i < iter->n); +} + +/* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable + ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */ +#define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \ + for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \ + (arg) = next_aggr_init_expr_arg (&(iter))) + +/* VEC_INIT_EXPR accessors. */ +#define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0) +#define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1) + +/* Indicates that a VEC_INIT_EXPR is a potential constant expression. + Only set when the current function is constexpr. */ +#define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \ + TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE)) + +/* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */ +#define VEC_INIT_EXPR_VALUE_INIT(NODE) \ + TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE)) + +/* If T is a VEC_INIT_EXPR, return it, possibly stripping a TARGET_EXPR + wrapper. Otherwise, return null. */ +inline tree +get_vec_init_expr (tree t) +{ + if (t && TREE_CODE (t) == TARGET_EXPR) + t = TARGET_EXPR_INITIAL (t); + if (t && TREE_CODE (t) == VEC_INIT_EXPR) + return t; + return NULL_TREE; +} + +/* The condition under which this MUST_NOT_THROW_EXPR actually blocks + exceptions. NULL_TREE means 'true'. */ +#define MUST_NOT_THROW_COND(NODE) \ + TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1) + +/* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a + TEMPLATE_DECL. This macro determines whether or not a given class + type is really a template type, as opposed to an instantiation or + specialization of one. */ +#define CLASSTYPE_IS_TEMPLATE(NODE) \ + (CLASSTYPE_TEMPLATE_INFO (NODE) \ + && !CLASSTYPE_USE_TEMPLATE (NODE) \ + && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) + +/* The name used by the user to name the typename type. Typically, + this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the + corresponding TYPE_DECL. However, this may also be a + TEMPLATE_ID_EXPR if we had something like `typename X::Y'. */ +#define TYPENAME_TYPE_FULLNAME(NODE) \ + (TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE))) + +/* True if a TYPENAME_TYPE was declared as an "enum". */ +#define TYPENAME_IS_ENUM_P(NODE) \ + (TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE))) + +/* True if a TYPENAME_TYPE was declared as a "class", "struct", or + "union". */ +#define TYPENAME_IS_CLASS_P(NODE) \ + (TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE))) + +/* True if a TYPENAME_TYPE is in the process of being resolved. */ +#define TYPENAME_IS_RESOLVING_P(NODE) \ + (TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE))) + +/* [class.virtual] + + A class that declares or inherits a virtual function is called a + polymorphic class. */ +#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE)) + +/* Nonzero if this class has a virtual function table pointer. */ +#define TYPE_CONTAINS_VPTR_P(NODE) \ + (TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE)) + +/* Nonzero if NODE is a FUNCTION_DECL or VARIABLE_DECL (for a decl + with namespace scope) declared in a local scope. */ +#define DECL_LOCAL_DECL_P(NODE) \ + DECL_LANG_FLAG_0 (VAR_OR_FUNCTION_DECL_CHECK (NODE)) + +/* The namespace-scope decl a DECL_LOCAL_DECL_P aliases. */ +#define DECL_LOCAL_DECL_ALIAS(NODE) \ + DECL_ACCESS ((gcc_checking_assert (DECL_LOCAL_DECL_P (NODE)), NODE)) + +/* Nonzero if NODE is the target for genericization of 'return' stmts + in constructors/destructors of targetm.cxx.cdtor_returns_this targets. */ +#define LABEL_DECL_CDTOR(NODE) \ + DECL_LANG_FLAG_2 (LABEL_DECL_CHECK (NODE)) + +/* True if NODE was declared with auto in its return type, but it has + started compilation and so the return type might have been changed by + return type deduction; its declared return type should be found in + DECL_SAVED_AUTO_RETURN_TYPE (NODE). */ +#define FNDECL_USED_AUTO(NODE) \ + TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE)) + +/* True if NODE is needed for a manifestly constant-evaluated expression. + This doesn't especially need to be a flag, since currently it's only + used for error recovery; if we run out of function flags it could move + to an attribute. */ +#define FNDECL_MANIFESTLY_CONST_EVALUATED(NODE) \ + TREE_LANG_FLAG_4 (FUNCTION_DECL_CHECK (NODE)) + +/* True for artificial decls added for OpenMP privatized non-static + data members. */ +#define DECL_OMP_PRIVATIZED_MEMBER(NODE) \ + (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.anticipated_p) + +/* Nonzero if NODE is an artificial FUNCTION_DECL for + #pragma omp declare reduction. */ +#define DECL_OMP_DECLARE_REDUCTION_P(NODE) \ + (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p) + +/* Nonzero if DECL has been declared threadprivate by + #pragma omp threadprivate. */ +#define CP_DECL_THREADPRIVATE_P(DECL) \ + (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p) + +/* Nonzero if NODE is a VAR_DECL which has been declared inline. */ +#define DECL_VAR_DECLARED_INLINE_P(NODE) \ + (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ + ? DECL_LANG_SPECIFIC (NODE)->u.base.var_declared_inline_p \ + : false) +#define SET_DECL_VAR_DECLARED_INLINE_P(NODE) \ + (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.var_declared_inline_p \ + = true) + +/* True if NODE is a constant variable with a value-dependent initializer. */ +#define DECL_DEPENDENT_INIT_P(NODE) \ + (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ + && DECL_LANG_SPECIFIC (NODE)->u.base.dependent_init_p) +#define SET_DECL_DEPENDENT_INIT_P(NODE, X) \ + (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.dependent_init_p = (X)) + +/* Nonzero if NODE is an artificial VAR_DECL for a C++17 structured binding + declaration or one of VAR_DECLs for the user identifiers in it. */ +#define DECL_DECOMPOSITION_P(NODE) \ + (VAR_P (NODE) && DECL_LANG_SPECIFIC (NODE) \ + ? DECL_LANG_SPECIFIC (NODE)->u.base.selector == lds_decomp \ + : false) + +/* The underlying artificial VAR_DECL for structured binding. */ +#define DECL_DECOMP_BASE(NODE) \ + (LANG_DECL_DECOMP_CHECK (NODE)->base) + +/* Nonzero if NODE is an inline VAR_DECL. In C++17, static data members + declared with constexpr specifier are implicitly inline variables. */ +#define DECL_INLINE_VAR_P(NODE) \ + (DECL_VAR_DECLARED_INLINE_P (NODE) \ + || (cxx_dialect >= cxx17 \ + && DECL_DECLARED_CONSTEXPR_P (NODE) \ + && DECL_CLASS_SCOPE_P (NODE))) + +/* Nonzero if DECL was declared with '= delete'. */ +#define DECL_DELETED_FN(DECL) \ + (LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p) + +/* Nonzero if DECL was declared with '= default' (maybe implicitly). */ +#define DECL_DEFAULTED_FN(DECL) \ + (LANG_DECL_FN_CHECK (DECL)->defaulted_p) + +/* Nonzero if DECL is explicitly defaulted in the class body. */ +#define DECL_DEFAULTED_IN_CLASS_P(DECL) \ + (DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL)) +/* Nonzero if DECL was defaulted outside the class body. */ +#define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \ + (DECL_DEFAULTED_FN (DECL) \ + && !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL))) + +/* Record whether a typedef for type `int' was actually `signed int'. */ +#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) + +/* Returns nonzero if DECL has external linkage, as specified by the + language standard. (This predicate may hold even when the + corresponding entity is not actually given external linkage in the + object file; see decl_linkage for details.) */ +#define DECL_EXTERNAL_LINKAGE_P(DECL) \ + (decl_linkage (DECL) == lk_external) + +/* Keep these codes in ascending code order. */ + +#define INTEGRAL_CODE_P(CODE) \ + ((CODE) == ENUMERAL_TYPE \ + || (CODE) == BOOLEAN_TYPE \ + || (CODE) == INTEGER_TYPE) + +/* [basic.fundamental] + + Types bool, char, wchar_t, and the signed and unsigned integer types + are collectively called integral types. + + Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration + types as well, which is incorrect in C++. Keep these checks in + ascending code order. */ +#define CP_INTEGRAL_TYPE_P(TYPE) \ + (TREE_CODE (TYPE) == BOOLEAN_TYPE \ + || TREE_CODE (TYPE) == INTEGER_TYPE) + +/* Returns true if TYPE is an integral or enumeration name. Keep + these checks in ascending code order. */ +#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \ + (TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE)) + +/* Returns true if TYPE is an integral or unscoped enumeration type. */ +#define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \ + (UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE)) + +/* True if the class type TYPE is a literal type. */ +#define CLASSTYPE_LITERAL_P(TYPE) \ + (LANG_TYPE_CLASS_CHECK (TYPE)->is_literal) + +/* [basic.fundamental] + + Integral and floating types are collectively called arithmetic + types. + + As a GNU extension, we also accept complex types. + + Keep these checks in ascending code order. */ +#define ARITHMETIC_TYPE_P(TYPE) \ + (CP_INTEGRAL_TYPE_P (TYPE) \ + || TREE_CODE (TYPE) == REAL_TYPE \ + || TREE_CODE (TYPE) == COMPLEX_TYPE) + +/* True iff TYPE is cv decltype(nullptr). */ +#define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE) + +/* [basic.types] + + Arithmetic types, enumeration types, pointer types, + pointer-to-member types, and std::nullptr_t are collectively called + scalar types. + + Keep these checks in ascending code order. */ +#define SCALAR_TYPE_P(TYPE) \ + (TYPE_PTRDATAMEM_P (TYPE) \ + || TREE_CODE (TYPE) == ENUMERAL_TYPE \ + || ARITHMETIC_TYPE_P (TYPE) \ + || TYPE_PTR_P (TYPE) \ + || TYPE_PTRMEMFUNC_P (TYPE) \ + || NULLPTR_TYPE_P (TYPE)) + +/* Determines whether this type is a C++0x scoped enumeration + type. Scoped enumerations types are introduced via "enum class" or + "enum struct", e.g., + + enum class Color { + Red, Green, Blue + }; + + Scoped enumeration types are different from normal (unscoped) + enumeration types in several ways: + + - The enumerators of a scoped enumeration type are only available + within the scope of the enumeration type and not in the + enclosing scope. For example, the Red color can be referred to + with "Color::Red" but not "Red". + + - Scoped enumerators and enumerations do not implicitly convert + to integers or 'bool'. + + - The underlying type of the enum is well-defined. */ +#define SCOPED_ENUM_P(TYPE) \ + (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE)) + +/* Determine whether this is an unscoped enumeration type. */ +#define UNSCOPED_ENUM_P(TYPE) \ + (TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE)) + +/* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped + enumeration type (1) or a normal (unscoped) enumeration type + (0). */ +#define SET_SCOPED_ENUM_P(TYPE, VAL) \ + (ENUM_IS_SCOPED (TYPE) = (VAL)) + +#define SET_OPAQUE_ENUM_P(TYPE, VAL) \ + (ENUM_IS_OPAQUE (TYPE) = (VAL)) + +#define OPAQUE_ENUM_P(TYPE) \ + (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE)) + +/* Determines whether an ENUMERAL_TYPE has an explicit + underlying type. */ +#define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE)) + +/* Returns the underlying type of the given enumeration type. The + underlying type is determined in different ways, depending on the + properties of the enum: + + - In C++0x, the underlying type can be explicitly specified, e.g., + + enum E1 : char { ... } // underlying type is char + + - In a C++0x scoped enumeration, the underlying type is int + unless otherwises specified: + + enum class E2 { ... } // underlying type is int + + - Otherwise, the underlying type is determined based on the + values of the enumerators. In this case, the + ENUM_UNDERLYING_TYPE will not be set until after the definition + of the enumeration is completed by finish_enum. */ +#define ENUM_UNDERLYING_TYPE(TYPE) \ + TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE)) + +/* [dcl.init.aggr] + + An aggregate is an array or a class with no user-provided + constructors, no brace-or-equal-initializers for non-static data + members, no private or protected non-static data members, no + base classes, and no virtual functions. + + As an extension, we also treat vectors as aggregates. Keep these + checks in ascending code order. */ +#define CP_AGGREGATE_TYPE_P(TYPE) \ + (gnu_vector_type_p (TYPE) \ + || TREE_CODE (TYPE) == ARRAY_TYPE \ + || (CLASS_TYPE_P (TYPE) && COMPLETE_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE))) + +/* Nonzero for a class type means that the class type has a + user-declared constructor. */ +#define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE)) + +/* Nonzero means that the FUNCTION_TYPE or METHOD_TYPE has a + late-specified return type. */ +#define TYPE_HAS_LATE_RETURN_TYPE(NODE) \ + (TYPE_LANG_FLAG_2 (FUNC_OR_METHOD_CHECK (NODE))) + +/* When appearing in an INDIRECT_REF, it means that the tree structure + underneath is actually a call to a constructor. This is needed + when the constructor must initialize local storage (which can + be automatically destroyed), rather than allowing it to allocate + space from the heap. + + When appearing in a SAVE_EXPR, it means that underneath + is a call to a constructor. + + When appearing in a CONSTRUCTOR, the expression is an unconverted + compound literal. + + When appearing in a FIELD_DECL, it means that this field + has been duly initialized in its constructor. */ +#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE)) + +/* True if NODE is a brace-enclosed initializer. */ +#define BRACE_ENCLOSED_INITIALIZER_P(NODE) \ + (TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node) + +/* True if NODE is a compound-literal, i.e., a brace-enclosed + initializer cast to a particular type. This is mostly only set during + template parsing; once the initializer has been digested into an actual + value of the type, the expression is represented by a TARGET_EXPR. */ +#define COMPOUND_LITERAL_P(NODE) \ + (TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE)) + +#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \ + && vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\ + && !TREE_HAS_CONSTRUCTOR (NODE)) + +/* True if NODE is a init-list used as a direct-initializer, i.e. + B b{1,2}, not B b({1,2}) or B b = {1,2}. */ +#define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE))) + +/* True if this CONSTRUCTOR is instantiation-dependent and needs to be + substituted. */ +#define CONSTRUCTOR_IS_DEPENDENT(NODE) \ + (TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (NODE))) + +/* True if this CONSTRUCTOR should not be used as a variable initializer + because it was loaded from a constexpr variable with mutable fields. */ +#define CONSTRUCTOR_MUTABLE_POISON(NODE) \ + (TREE_LANG_FLAG_2 (CONSTRUCTOR_CHECK (NODE))) + +/* True if this typed CONSTRUCTOR represents C99 compound-literal syntax rather + than C++11 functional cast syntax. */ +#define CONSTRUCTOR_C99_COMPOUND_LITERAL(NODE) \ + (TREE_LANG_FLAG_3 (CONSTRUCTOR_CHECK (NODE))) + +/* True if this CONSTRUCTOR contains PLACEHOLDER_EXPRs referencing the + CONSTRUCTOR's type not nested inside another CONSTRUCTOR marked with + CONSTRUCTOR_PLACEHOLDER_BOUNDARY. */ +#define CONSTRUCTOR_PLACEHOLDER_BOUNDARY(NODE) \ + (TREE_LANG_FLAG_5 (CONSTRUCTOR_CHECK (NODE))) + +#define DIRECT_LIST_INIT_P(NODE) \ + (BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE)) + +/* True if this is a designated initializer (when we allow initializer-clauses + mixed with designated-initializer-clauses set whenever there is at least + one designated-initializer-clause), or a C99 designator. */ +#define CONSTRUCTOR_IS_DESIGNATED_INIT(NODE) \ + (TREE_LANG_FLAG_6 (CONSTRUCTOR_CHECK (NODE))) + +/* True if this CONSTRUCTOR comes from a parenthesized list of values, e.g. + A(1, 2, 3). */ +#define CONSTRUCTOR_IS_PAREN_INIT(NODE) \ + (CONSTRUCTOR_CHECK(NODE)->base.private_flag) + +/* True if reshape_init built this sub-CONSTRUCTOR to undo the brace elision + of the original CONSTRUCTOR. This flag is used during C++20 aggregate + CTAD. */ +#define CONSTRUCTOR_BRACES_ELIDED_P(NODE) \ + (CONSTRUCTOR_CHECK (NODE)->base.protected_flag) + +/* True if NODE represents a conversion for direct-initialization in a + template. Set by perform_implicit_conversion_flags. */ +#define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \ + (TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE))) + +/* True if NODE represents a dependent conversion of a non-type template + argument. Set by maybe_convert_nontype_argument. */ +#define IMPLICIT_CONV_EXPR_NONTYPE_ARG(NODE) \ + (TREE_LANG_FLAG_1 (IMPLICIT_CONV_EXPR_CHECK (NODE))) + +/* True if NODE represents a conversion for braced-init-list in a + template. Set by perform_implicit_conversion_flags. */ +#define IMPLICIT_CONV_EXPR_BRACED_INIT(NODE) \ + (TREE_LANG_FLAG_2 (IMPLICIT_CONV_EXPR_CHECK (NODE))) + +/* Nonzero means that an object of this type cannot be initialized using + an initializer list. */ +#define CLASSTYPE_NON_AGGREGATE(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate) +#define TYPE_NON_AGGREGATE_CLASS(NODE) \ + (CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE)) + +/* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */ +#define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign) + +/* Nonzero if there is a non-trivial X::X(cv X&) for this class. */ +#define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor) + +/* Nonzero if there is a non-trivial X::op=(X&&) for this class. */ +#define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign) + +/* Nonzero if there is a non-trivial X::X(X&&) for this class. */ +#define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor) + +/* Nonzero if there is no trivial default constructor for this class. */ +#define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt) + +/* Nonzero if TYPE has a trivial destructor. From [class.dtor]: + + A destructor is trivial if it is an implicitly declared + destructor and if: + + - all of the direct base classes of its class have trivial + destructors, + + - for all of the non-static data members of its class that are + of class type (or array thereof), each such class has a + trivial destructor. */ +#define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \ + (!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE)) + +/* Nonzero for _TYPE node means that this type does not have a trivial + destructor. Therefore, destroying an object of this type will + involve a call to a destructor. This can apply to objects of + ARRAY_TYPE if the type of the elements needs a destructor. */ +#define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \ + (TYPE_LANG_FLAG_4 (NODE)) + +/* Nonzero for class type means that the default constructor is trivial. */ +#define TYPE_HAS_TRIVIAL_DFLT(NODE) \ + (TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE)) + +/* Nonzero for class type means that copy initialization of this type can use + a bitwise copy. */ +#define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \ + (TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE)) + +/* Nonzero for class type means that assignment of this type can use + a bitwise copy. */ +#define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \ + (TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE)) + +/* Returns true if NODE is a pointer-to-data-member. */ +#define TYPE_PTRDATAMEM_P(NODE) \ + (TREE_CODE (NODE) == OFFSET_TYPE) + +/* Returns true if NODE is a pointer. */ +#define TYPE_PTR_P(NODE) \ + (TREE_CODE (NODE) == POINTER_TYPE) + +/* Returns true if NODE is a reference. */ +#define TYPE_REF_P(NODE) \ + (TREE_CODE (NODE) == REFERENCE_TYPE) + +/* Returns true if NODE is a pointer or a reference. */ +#define INDIRECT_TYPE_P(NODE) \ + (TYPE_PTR_P (NODE) || TYPE_REF_P (NODE)) + +/* Returns true if NODE is an object type: + + [basic.types] + + An object type is a (possibly cv-qualified) type that is not a + function type, not a reference type, and not a void type. + + Keep these checks in ascending order, for speed. */ +#define TYPE_OBJ_P(NODE) \ + (!TYPE_REF_P (NODE) \ + && !VOID_TYPE_P (NODE) \ + && !FUNC_OR_METHOD_TYPE_P (NODE)) + +/* Returns true if NODE is a pointer to an object. Keep these checks + in ascending tree code order. */ +#define TYPE_PTROB_P(NODE) \ + (TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE))) + +/* Returns true if NODE is a reference to an object. Keep these checks + in ascending tree code order. */ +#define TYPE_REF_OBJ_P(NODE) \ + (TYPE_REF_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE))) + +/* Returns true if NODE is a pointer to an object, or a pointer to + void. Keep these checks in ascending tree code order. */ +#define TYPE_PTROBV_P(NODE) \ + (TYPE_PTR_P (NODE) \ + && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (NODE))) + +/* Returns true if NODE is a pointer to function type. */ +#define TYPE_PTRFN_P(NODE) \ + (TYPE_PTR_P (NODE) \ + && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) + +/* Returns true if NODE is a reference to function type. */ +#define TYPE_REFFN_P(NODE) \ + (TYPE_REF_P (NODE) \ + && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) + +/* Returns true if NODE is a pointer to member function type. */ +#define TYPE_PTRMEMFUNC_P(NODE) \ + (TREE_CODE (NODE) == RECORD_TYPE \ + && TYPE_PTRMEMFUNC_FLAG (NODE)) + +#define TYPE_PTRMEMFUNC_FLAG(NODE) \ + (TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE))) + +/* Returns true if NODE is a pointer-to-member. */ +#define TYPE_PTRMEM_P(NODE) \ + (TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE)) + +/* Returns true if NODE is a pointer or a pointer-to-member. */ +#define TYPE_PTR_OR_PTRMEM_P(NODE) \ + (TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE)) + +/* Indicates when overload resolution may resolve to a pointer to + member function. [expr.unary.op]/3 */ +#define PTRMEM_OK_P(NODE) \ + TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF)) + +/* Get the POINTER_TYPE to the METHOD_TYPE associated with this + pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true, + before using this macro. */ +#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \ + (cp_build_qualified_type (TREE_TYPE (TYPE_FIELDS (NODE)),\ + cp_type_quals (NODE))) + +/* As above, but can be used in places that want an lvalue at the expense + of not necessarily having the correct cv-qualifiers. */ +#define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) \ + (TREE_TYPE (TYPE_FIELDS (NODE))) + +/* Returns `A' for a type like `int (A::*)(double)' */ +#define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \ + TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) + +/* The canonical internal RECORD_TYPE from the POINTER_TYPE to + METHOD_TYPE. */ +#define TYPE_PTRMEMFUNC_TYPE(NODE) \ + TYPE_LANG_SLOT_1 (NODE) + +/* For a pointer-to-member type of the form `T X::*', this is `X'. + For a type like `void (X::*)() const', this type is `X', not `const + X'. To get at the `const X' you have to look at the + TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have + type `const X*'. */ +#define TYPE_PTRMEM_CLASS_TYPE(NODE) \ + (TYPE_PTRDATAMEM_P (NODE) \ + ? TYPE_OFFSET_BASETYPE (NODE) \ + : TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE)) + +/* For a pointer-to-member type of the form `T X::*', this is `T'. */ +#define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \ + (TYPE_PTRDATAMEM_P (NODE) \ + ? TREE_TYPE (NODE) \ + : TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) + +/* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for + `X'. */ +#define PTRMEM_CST_CLASS(NODE) \ + TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE))) + +/* For a pointer-to-member constant `X::Y' this is the _DECL for + `Y'. */ +#define PTRMEM_CST_MEMBER(NODE) \ + (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member) + +/* For a pointer-to-member constant `X::Y' this is a location where + the address of the member has been taken. */ +#define PTRMEM_CST_LOCATION(NODE) \ + (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->locus) + +/* The expression in question for a TYPEOF_TYPE. */ +#define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE))) + +/* The type in question for an UNDERLYING_TYPE. */ +#define UNDERLYING_TYPE_TYPE(NODE) \ + (TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE))) + +/* The type in question for BASES. */ +#define BASES_TYPE(NODE) \ + (TYPE_VALUES_RAW (BASES_CHECK (NODE))) + +#define BASES_DIRECT(NODE) \ + TREE_LANG_FLAG_0 (BASES_CHECK (NODE)) + +/* The expression in question for a DECLTYPE_TYPE. */ +#define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE))) + +/* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an + id-expression or a member-access expression. When false, it was + parsed as a full expression. */ +#define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \ + (DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag + +/* These flags indicate that we want different semantics from normal + decltype: lambda capture just drops references, + lambda proxies look through implicit dereference. */ +#define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \ + TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE)) +#define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \ + TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE)) +#define DECLTYPE_FOR_REF_CAPTURE(NODE) \ + TREE_LANG_FLAG_3 (DECLTYPE_TYPE_CHECK (NODE)) + +/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was + specified in its declaration. This can also be set for an + erroneously declared PARM_DECL. */ +#define DECL_THIS_EXTERN(NODE) \ + DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) + +/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was + specified in its declaration. This can also be set for an + erroneously declared PARM_DECL. */ +#define DECL_THIS_STATIC(NODE) \ + DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) + +/* Nonzero for FIELD_DECL node means that this field is a lambda capture + field for an array of runtime bound. */ +#define DECL_VLA_CAPTURE_P(NODE) \ + DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE)) + +/* Nonzero for PARM_DECL node means that this is an array function + parameter, i.e, a[] rather than *a. */ +#define DECL_ARRAY_PARAMETER_P(NODE) \ + DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE)) + +/* Nonzero for a FIELD_DECL who's NSMDI is currently being + instantiated. */ +#define DECL_INSTANTIATING_NSDMI_P(NODE) \ + DECL_LANG_FLAG_2 (FIELD_DECL_CHECK (NODE)) + +/* Nonzero for FIELD_DECL node means that this field is a base class + of the parent object, as opposed to a member field. */ +#define DECL_FIELD_IS_BASE(NODE) \ + DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE)) + +/* Nonzero for FIELD_DECL node means that this field is a simple (no + explicit initializer) lambda capture field, making it invisible to + name lookup in unevaluated contexts. */ +#define DECL_NORMAL_CAPTURE_P(NODE) \ + DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE)) + +/* Nonzero if TYPE is an anonymous union or struct type. We have to use a + flag for this because "A union for which objects or pointers are + declared is not an anonymous union" [class.union]. */ +#define ANON_AGGR_TYPE_P(NODE) \ + (CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr) +#define SET_ANON_AGGR_TYPE_P(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1) + +/* Nonzero if TYPE is an anonymous union type. */ +#define ANON_UNION_TYPE_P(NODE) \ + (TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE)) + +/* For an ANON_AGGR_TYPE_P the single FIELD_DECL it is used with. */ +#define ANON_AGGR_TYPE_FIELD(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var) + +/* Define fields and accessors for nodes representing declared names. */ + +/* True if TYPE is an unnamed structured type with a typedef for + linkage purposes. In that case TYPE_NAME and TYPE_STUB_DECL of the + MAIN-VARIANT are different. */ +#define TYPE_WAS_UNNAMED(NODE) \ + (TYPE_NAME (TYPE_MAIN_VARIANT (NODE)) \ + != TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE))) + +/* C++: all of these are overloaded! These apply only to TYPE_DECLs. */ + +/* The format of each node in the DECL_FRIENDLIST is as follows: + + The TREE_PURPOSE will be the name of a function, i.e., an + IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose + TREE_VALUEs are friends with the given name. */ +#define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE)) +#define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST)) +#define FRIEND_DECLS(LIST) (TREE_VALUE (LIST)) + +/* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of + each node is a type; the TREE_VALUE is the access granted for this + DECL in that type. The DECL_ACCESS is set by access declarations. + For example, if a member that would normally be public in a + derived class is made protected, then the derived class and the + protected_access_node will appear in the DECL_ACCESS for the node. */ +#define DECL_ACCESS(NODE) (LANG_DECL_MIN_CHECK (NODE)->access) + +/* Nonzero if the FUNCTION_DECL is a global constructor. */ +#define DECL_GLOBAL_CTOR_P(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->global_ctor_p) + +/* Nonzero if the FUNCTION_DECL is a global destructor. */ +#define DECL_GLOBAL_DTOR_P(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->global_dtor_p) + +/* Accessor macros for C++ template decl nodes. */ + +/* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node + is a INT_CST whose TREE_INT_CST_LOW indicates the level of the + template parameters, with 1 being the outermost set of template + parameters. The TREE_VALUE is a vector, whose elements are the + template parameters at each level. Each element in the vector is a + TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a + non-type parameter), or a TYPE_DECL (if the parameter is a type + parameter) or a TEMPLATE_DECL (if the parameter is a template + parameter). The TREE_PURPOSE is the default value, if any. The + TEMPLATE_PARM_INDEX for the parameter is available as the + DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a + TYPE_DECL). + + FIXME: CONST_CAST_TREE is a hack that hopefully will go away after + tree is converted to C++ class hiearchy. */ +#define DECL_TEMPLATE_PARMS(NODE) \ + ((struct tree_template_decl *)CONST_CAST_TREE (TEMPLATE_DECL_CHECK (NODE)))->arguments +#define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \ + INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE)) +#define DECL_NTPARMS(NODE) \ + TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE)) +/* For function, method, class-data templates. + + FIXME: CONST_CAST_TREE is a hack that hopefully will go away after + tree is converted to C++ class hiearchy. */ +#define DECL_TEMPLATE_RESULT(NODE) \ + ((struct tree_template_decl *)CONST_CAST_TREE(TEMPLATE_DECL_CHECK (NODE)))->result +/* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS + lists all instantiations and specializations of the function so that + tsubst_friend_function can reassign them to another template if we find + that the namespace-scope template is really a partial instantiation of a + friend template. + + For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds + all instantiations and specializations of the class type, including + partial instantiations and partial specializations, so that if we + explicitly specialize a partial instantiation we can walk the list + in maybe_process_partial_specialization and reassign them or complain + as appropriate. + + In both cases, the TREE_PURPOSE of each node contains the arguments + used; the TREE_VALUE contains the generated variable. The template + arguments are always complete. For example, given: + + template struct S1 { + template struct S2 {}; + template struct S2 {}; + }; + + the record for the partial specialization will contain, as its + argument list, { {T}, {U*} }, and will be on the + DECL_TEMPLATE_INSTANTIATIONS list for `template template + struct S1::S2'. + + This list is not used for other templates. */ +#define DECL_TEMPLATE_INSTANTIATIONS(NODE) \ + DECL_SIZE_UNIT (TEMPLATE_DECL_CHECK (NODE)) + +/* For a class template, this list contains the partial + specializations of this template. (Full specializations are not + recorded on this list.) The TREE_PURPOSE holds the arguments used + in the partial specialization (e.g., for `template struct + S' this will be `T*, int'.) The arguments will also include + any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL + for the partial specialization. The TREE_TYPE is the _TYPE node for + the partial specialization. + + This list is not used for other templates. */ +#define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \ + DECL_SIZE (TEMPLATE_DECL_CHECK (NODE)) + +/* Nonzero for a DECL which is actually a template parameter. Keep + these checks in ascending tree code order. */ +#define DECL_TEMPLATE_PARM_P(NODE) \ + (DECL_LANG_FLAG_0 (NODE) \ + && (TREE_CODE (NODE) == CONST_DECL \ + || TREE_CODE (NODE) == PARM_DECL \ + || TREE_CODE (NODE) == TYPE_DECL \ + || TREE_CODE (NODE) == TEMPLATE_DECL)) + +/* Nonzero for a raw template parameter node. */ +#define TEMPLATE_PARM_P(NODE) \ + (TREE_CODE (NODE) == TEMPLATE_TYPE_PARM \ + || TREE_CODE (NODE) == TEMPLATE_TEMPLATE_PARM \ + || TREE_CODE (NODE) == TEMPLATE_PARM_INDEX) + +/* Mark NODE as a template parameter. */ +#define SET_DECL_TEMPLATE_PARM_P(NODE) \ + (DECL_LANG_FLAG_0 (NODE) = 1) + +/* Nonzero if NODE is a template template parameter. */ +#define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \ + (TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE)) + +/* Nonzero for a DECL that represents a function template. */ +#define DECL_FUNCTION_TEMPLATE_P(NODE) \ + (TREE_CODE (NODE) == TEMPLATE_DECL \ + && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \ + && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL) + +/* Nonzero for a DECL that represents a class template or alias + template. */ +#define DECL_TYPE_TEMPLATE_P(NODE) \ + (TREE_CODE (NODE) == TEMPLATE_DECL \ + && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \ + && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL) + +/* Nonzero for a DECL that represents a class template. */ +#define DECL_CLASS_TEMPLATE_P(NODE) \ + (DECL_TYPE_TEMPLATE_P (NODE) \ + && DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE))) + +/* Nonzero for a TEMPLATE_DECL that represents an alias template. */ +#define DECL_ALIAS_TEMPLATE_P(NODE) \ + (DECL_TYPE_TEMPLATE_P (NODE) \ + && !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE))) + +/* Nonzero for a NODE which declares a type. */ +#define DECL_DECLARES_TYPE_P(NODE) \ + (TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE)) + +/* Nonzero if NODE declares a function. */ +#define DECL_DECLARES_FUNCTION_P(NODE) \ + (TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE)) + +/* Nonzero if NODE is the typedef implicitly generated for a type when + the type is declared. In C++, `struct S {};' is roughly + equivalent to `struct S {}; typedef struct S S;' in C. + DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this + example. In C++, there is a second implicit typedef for each + class, called the injected-class-name, in the scope of `S' itself, so that + you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that typedef. */ +#define DECL_IMPLICIT_TYPEDEF_P(NODE) \ + (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE)) +#define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \ + (DECL_LANG_FLAG_2 (NODE) = 1) +#define DECL_SELF_REFERENCE_P(NODE) \ + (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE)) +#define SET_DECL_SELF_REFERENCE_P(NODE) \ + (DECL_LANG_FLAG_4 (NODE) = 1) + +/* A `primary' template is one that has its own template header and is not + a partial specialization. A member function of a class template is a + template, but not primary. A member template is primary. Friend + templates are primary, too. */ + +/* Returns the primary template corresponding to these parameters. */ +#define TPARMS_PRIMARY_TEMPLATE(NODE) (TREE_TYPE (NODE)) + +#define DECL_PRIMARY_TEMPLATE(NODE) \ + (TPARMS_PRIMARY_TEMPLATE (DECL_INNERMOST_TEMPLATE_PARMS (NODE))) + +/* Returns nonzero if NODE is a primary template. */ +#define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE)) + +/* Nonzero iff NODE is a specialization of a template. The value + indicates the type of specializations: + + 1=implicit instantiation + + 2=partial or explicit specialization, e.g.: + + template <> int min (int, int), + + 3=explicit instantiation, e.g.: + + template int min (int, int); + + Note that NODE will be marked as a specialization even if the + template it is instantiating is not a primary template. For + example, given: + + template struct O { + void f(); + struct I {}; + }; + + both O::f and O::I will be marked as instantiations. + + If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also + be non-NULL. */ +#define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template) + +/* Like DECL_USE_TEMPLATE, but for class types. */ +#define CLASSTYPE_USE_TEMPLATE(NODE) \ + (LANG_TYPE_CLASS_CHECK (NODE)->use_template) + +/* True if NODE is a specialization of a primary template. */ +#define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \ + (CLASS_TYPE_P (NODE) \ + && CLASSTYPE_USE_TEMPLATE (NODE) \ + && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) + +#define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1) +#define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \ + (CLASSTYPE_USE_TEMPLATE (NODE) & 1) + +#define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2) +#define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2) + +/* Returns true for an explicit or partial specialization of a class + template. */ +#define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ + (CLASSTYPE_USE_TEMPLATE (NODE) == 2) +#define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ + (CLASSTYPE_USE_TEMPLATE (NODE) = 2) + +#define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1) +#define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1) +#define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ + (CLASSTYPE_USE_TEMPLATE (NODE) == 1) +#define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ + (CLASSTYPE_USE_TEMPLATE (NODE) = 1) + +#define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3) +#define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3) +#define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ + (CLASSTYPE_USE_TEMPLATE (NODE) == 3) +#define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ + (CLASSTYPE_USE_TEMPLATE (NODE) = 3) + +/* Nonzero if DECL is a friend function which is an instantiation + from the point of view of the compiler, but not from the point of + view of the language. For example given: + template struct S { friend void f(T) {}; }; + the declaration of `void f(int)' generated when S is + instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be + a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */ +#define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \ + (DECL_LANG_SPECIFIC (DECL) && DECL_TEMPLATE_INFO (DECL) \ + && !DECL_USE_TEMPLATE (DECL)) + +/* Nonzero if DECL is a function generated from a function 'temploid', + i.e. template, member of class template, or dependent friend. */ +#define DECL_TEMPLOID_INSTANTIATION(DECL) \ + (DECL_TEMPLATE_INSTANTIATION (DECL) \ + || DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL)) + +/* Nonzero if DECL is either defined implicitly by the compiler or + generated from a temploid. */ +#define DECL_GENERATED_P(DECL) \ + (DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL)) + +/* Nonzero iff we are currently processing a declaration for an + entity with its own template parameter list, and which is not a + full specialization. */ +#define PROCESSING_REAL_TEMPLATE_DECL_P() \ + (!processing_template_parmlist \ + && current_template_depth > template_class_depth (current_scope ())) + +/* Nonzero if this VAR_DECL or FUNCTION_DECL has already been + instantiated, i.e. its definition has been generated from the + pattern given in the template. */ +#define DECL_TEMPLATE_INSTANTIATED(NODE) \ + DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE)) + +/* We know what we're doing with this decl now. */ +#define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE) + +/* DECL_EXTERNAL must be set on a decl until the decl is actually emitted, + so that assemble_external will work properly. So we have this flag to + tell us whether the decl is really not external. + + This flag does not indicate whether or not the decl is defined in the + current translation unit; it indicates whether or not we should emit the + decl at the end of compilation if it is defined and needed. */ +#define DECL_NOT_REALLY_EXTERN(NODE) \ + (DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern) + +#define DECL_REALLY_EXTERN(NODE) \ + (DECL_EXTERNAL (NODE) \ + && (!DECL_LANG_SPECIFIC (NODE) || !DECL_NOT_REALLY_EXTERN (NODE))) + +/* A thunk is a stub function. + + A thunk is an alternate entry point for an ordinary FUNCTION_DECL. + The address of the ordinary FUNCTION_DECL is given by the + DECL_INITIAL, which is always an ADDR_EXPR whose operand is a + FUNCTION_DECL. The job of the thunk is to either adjust the this + pointer before transferring control to the FUNCTION_DECL, or call + FUNCTION_DECL and then adjust the result value. Note, the result + pointer adjusting thunk must perform a call to the thunked + function, (or be implemented via passing some invisible parameter + to the thunked function, which is modified to perform the + adjustment just before returning). + + A thunk may perform either, or both, of the following operations: + + o Adjust the this or result pointer by a constant offset. + o Adjust the this or result pointer by looking up a vcall or vbase offset + in the vtable. + + A this pointer adjusting thunk converts from a base to a derived + class, and hence adds the offsets. A result pointer adjusting thunk + converts from a derived class to a base, and hence subtracts the + offsets. If both operations are performed, then the constant + adjustment is performed first for this pointer adjustment and last + for the result pointer adjustment. + + The constant adjustment is given by THUNK_FIXED_OFFSET. If the + vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is + used. For this pointer adjusting thunks, it is the vcall offset + into the vtable. For result pointer adjusting thunks it is the + binfo of the virtual base to convert to. Use that binfo's vbase + offset. + + It is possible to have equivalent covariant thunks. These are + distinct virtual covariant thunks whose vbase offsets happen to + have the same value. THUNK_ALIAS is used to pick one as the + canonical thunk, which will get all the this pointer adjusting + thunks attached to it. */ + +/* An integer indicating how many bytes should be subtracted from the + this or result pointer when this function is called. */ +#define THUNK_FIXED_OFFSET(DECL) \ + (DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset) + +/* A tree indicating how to perform the virtual adjustment. For a this + adjusting thunk it is the number of bytes to be added to the vtable + to find the vcall offset. For a result adjusting thunk, it is the + binfo of the relevant virtual base. If NULL, then there is no + virtual adjust. (The vptr is always located at offset zero from + the this or result pointer.) (If the covariant type is within the + class hierarchy being laid out, the vbase index is not yet known + at the point we need to create the thunks, hence the need to use + binfos.) */ + +#define THUNK_VIRTUAL_OFFSET(DECL) \ + (LANG_DECL_MIN_CHECK (FUNCTION_DECL_CHECK (DECL))->access) + +/* A thunk which is equivalent to another thunk. */ +#define THUNK_ALIAS(DECL) \ + (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info) + +/* For thunk NODE, this is the FUNCTION_DECL thunked to. It is + possible for the target to be a thunk too. */ +#define THUNK_TARGET(NODE) \ + (LANG_DECL_FN_CHECK (NODE)->befriending_classes) + +/* True for a SCOPE_REF iff the "template" keyword was used to + indicate that the qualified name denotes a template. */ +#define QUALIFIED_NAME_IS_TEMPLATE(NODE) \ + (TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE))) + +/* [coroutines] +*/ + +/* True if NODE is a co-routine FUNCTION_DECL. */ +#define DECL_COROUTINE_P(NODE) \ + (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->coroutine_p) + +/* For a FUNCTION_DECL of a coroutine, this holds the ACTOR helper function + decl. */ +#define DECL_ACTOR_FN(NODE) \ + (coro_get_actor_function ((NODE))) + +/* For a FUNCTION_DECL of a coroutine, this holds the DESTROY helper function + decl. */ +#define DECL_DESTROY_FN(NODE) \ + (coro_get_destroy_function ((NODE))) + +/* For a FUNCTION_DECL of a coroutine helper (ACTOR or DESTROY), this points + back to the original (ramp) function. */ +#define DECL_RAMP_FN(NODE) \ + (coro_get_ramp_function (NODE)) + +/* True for an OMP_ATOMIC that has dependent parameters. These are stored + as an expr in operand 1, and integer_zero_node or clauses in operand 0. */ +#define OMP_ATOMIC_DEPENDENT_P(NODE) \ + (TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST \ + || TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == OMP_CLAUSE) + +/* Used while gimplifying continue statements bound to OMP_FOR nodes. */ +#define OMP_FOR_GIMPLIFYING_P(NODE) \ + (TREE_LANG_FLAG_0 (OMP_LOOPING_CHECK (NODE))) + +/* A language-specific token attached to the OpenMP data clauses to + hold code (or code fragments) related to ctors, dtors, and op=. + See semantics.cc for details. */ +#define CP_OMP_CLAUSE_INFO(NODE) \ + TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \ + OMP_CLAUSE__CONDTEMP_)) + +/* Nonzero if this transaction expression's body contains statements. */ +#define TRANSACTION_EXPR_IS_STMT(NODE) \ + TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE)) + +/* These macros provide convenient access to the various _STMT nodes + created when parsing template declarations. */ +#define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0) +#define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1) + +#define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0) +#define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1) + +#define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0) + +/* Nonzero if this try block is a function try block. */ +#define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE)) +#define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0) +#define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1) +#define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE)) + +/* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run + and the VAR_DECL for which this cleanup exists. */ +#define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0) +#define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1) +#define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2) + +/* IF_STMT accessors. These give access to the condition of the if + statement, the then block of the if statement, and the else block + of the if statement if it exists. */ +#define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0) +#define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1) +#define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2) +#define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3) +#define IF_STMT_CONSTEXPR_P(NODE) TREE_LANG_FLAG_0 (IF_STMT_CHECK (NODE)) +#define IF_STMT_CONSTEVAL_P(NODE) TREE_LANG_FLAG_2 (IF_STMT_CHECK (NODE)) + +/* Like PACK_EXPANSION_EXTRA_ARGS, for constexpr if. IF_SCOPE is used while + building an IF_STMT; IF_STMT_EXTRA_ARGS is used after it is complete. */ +#define IF_STMT_EXTRA_ARGS(NODE) IF_SCOPE (NODE) + +/* RANGE_FOR_STMT accessors. These give access to the declarator, + expression, body, and scope of the statement, respectively. */ +#define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0) +#define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1) +#define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2) +#define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3) +#define RANGE_FOR_UNROLL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 4) +#define RANGE_FOR_INIT_STMT(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 5) +#define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE)) + +/* STMT_EXPR accessor. */ +#define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0) + +/* EXPR_STMT accessor. This gives the expression associated with an + expression statement. */ +#define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0) + +/* True if this TARGET_EXPR was created by build_cplus_new, and so we can + discard it if it isn't useful. */ +#define TARGET_EXPR_IMPLICIT_P(NODE) \ + TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE)) + +/* True if this TARGET_EXPR is the result of list-initialization of a + temporary. */ +#define TARGET_EXPR_LIST_INIT_P(NODE) \ + TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE)) + +/* True if this TARGET_EXPR expresses direct-initialization of an object + to be named later. */ +#define TARGET_EXPR_DIRECT_INIT_P(NODE) \ + TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE)) + +/* True if NODE is a TARGET_EXPR that just expresses a copy of its INITIAL; if + the initializer has void type, it's doing something more complicated. */ +#define SIMPLE_TARGET_EXPR_P(NODE) \ + (TREE_CODE (NODE) == TARGET_EXPR \ + && TARGET_EXPR_INITIAL (NODE) \ + && !VOID_TYPE_P (TREE_TYPE (TARGET_EXPR_INITIAL (NODE)))) + +/* True if T is a TARGET_EXPR for which we'll need to replace_decl to use it as + an initializer. */ +inline bool +target_expr_needs_replace (tree t) +{ + if (!t || TREE_CODE (t) != TARGET_EXPR) + return false; + tree init = TARGET_EXPR_INITIAL (t); + if (!init || !VOID_TYPE_P (TREE_TYPE (init))) + return false; + while (TREE_CODE (init) == COMPOUND_EXPR) + init = TREE_OPERAND (init, 1); + return TREE_CODE (init) != AGGR_INIT_EXPR; +} + +/* True if EXPR expresses direct-initialization of a TYPE. */ +#define DIRECT_INIT_EXPR_P(TYPE,EXPR) \ + (TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \ + && same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR))) + +/* True if this CONVERT_EXPR is for a conversion to virtual base in + an NSDMI, and should be re-evaluated when used in a constructor. */ +#define CONVERT_EXPR_VBASE_PATH(NODE) \ + TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE)) + +/* True if SIZEOF_EXPR argument is type. */ +#define SIZEOF_EXPR_TYPE_P(NODE) \ + TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE)) + +/* True if the ALIGNOF_EXPR was spelled "alignof". */ +#define ALIGNOF_EXPR_STD_P(NODE) \ + TREE_LANG_FLAG_0 (ALIGNOF_EXPR_CHECK (NODE)) + +/* OMP_DEPOBJ accessors. These give access to the depobj expression of the + #pragma omp depobj directive and the clauses, respectively. If + OMP_DEPOBJ_CLAUSES is INTEGER_CST, it is instead the update clause kind + or OMP_CLAUSE_DEPEND_LAST for destroy clause. */ +#define OMP_DEPOBJ_DEPOBJ(NODE) TREE_OPERAND (OMP_DEPOBJ_CHECK (NODE), 0) +#define OMP_DEPOBJ_CLAUSES(NODE) TREE_OPERAND (OMP_DEPOBJ_CHECK (NODE), 1) + +/* An enumeration of the kind of tags that C++ accepts. */ +enum tag_types { + none_type = 0, /* Not a tag type. */ + record_type, /* "struct" types. */ + class_type, /* "class" types. */ + union_type, /* "union" types. */ + enum_type, /* "enum" types. */ + typename_type, /* "typename" types. */ + scope_type /* namespace or tagged type name followed by :: */ +}; + +/* The various kinds of lvalues we distinguish. */ +enum cp_lvalue_kind_flags { + clk_none = 0, /* Things that are not an lvalue. */ + clk_ordinary = 1, /* An ordinary lvalue. */ + clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */ + clk_class = 4, /* A prvalue of class or array type. */ + clk_bitfield = 8, /* An lvalue for a bit-field. */ + clk_packed = 16, /* An lvalue for a packed field. */ + clk_implicit_rval = 1<<5 /* An lvalue being treated as an xvalue. */ +}; + +/* This type is used for parameters and variables which hold + combinations of the flags in enum cp_lvalue_kind_flags. */ +typedef int cp_lvalue_kind; + +/* Various kinds of template specialization, instantiation, etc. */ +enum tmpl_spec_kind { + tsk_none, /* Not a template at all. */ + tsk_invalid_member_spec, /* An explicit member template + specialization, but the enclosing + classes have not all been explicitly + specialized. */ + tsk_invalid_expl_inst, /* An explicit instantiation containing + template parameter lists. */ + tsk_excessive_parms, /* A template declaration with too many + template parameter lists. */ + tsk_insufficient_parms, /* A template declaration with too few + parameter lists. */ + tsk_template, /* A template declaration. */ + tsk_expl_spec, /* An explicit specialization. */ + tsk_expl_inst /* An explicit instantiation. */ +}; + +/* The various kinds of access. BINFO_ACCESS depends on these being + two bit quantities. The numerical values are important; they are + used to initialize RTTI data structures, so changing them changes + the ABI. */ +enum access_kind { + ak_none = 0, /* Inaccessible. */ + ak_public = 1, /* Accessible, as a `public' thing. */ + ak_protected = 2, /* Accessible, as a `protected' thing. */ + ak_private = 3 /* Accessible, as a `private' thing. */ +}; + +/* The various kinds of special functions. If you add to this list, + you should update special_function_p as well. */ +enum special_function_kind { + sfk_none = 0, /* Not a special function. This enumeral + must have value zero; see + special_function_p. */ + /* The following are ordered, for use by member synthesis fns. */ + sfk_destructor, /* A destructor. */ + sfk_constructor, /* A constructor. */ + sfk_inheriting_constructor, /* An inheriting constructor */ + sfk_copy_constructor, /* A copy constructor. */ + sfk_move_constructor, /* A move constructor. */ + sfk_copy_assignment, /* A copy assignment operator. */ + sfk_move_assignment, /* A move assignment operator. */ + /* The following are unordered. */ + sfk_complete_destructor, /* A destructor for complete objects. */ + sfk_base_destructor, /* A destructor for base subobjects. */ + sfk_deleting_destructor, /* A destructor for complete objects that + deletes the object after it has been + destroyed. */ + sfk_conversion, /* A conversion operator. */ + sfk_deduction_guide, /* A class template deduction guide. */ + sfk_comparison, /* A comparison operator (e.g. ==, <, <=>). */ + sfk_virtual_destructor /* Used by member synthesis fns. */ +}; + +/* The various kinds of linkage. From [basic.link], + + A name is said to have linkage when it might denote the same + object, reference, function, type, template, namespace or value + as a name introduced in another scope: + + -- When a name has external linkage, the entity it denotes can + be referred to from scopes of other translation units or from + other scopes of the same translation unit. + + -- When a name has internal linkage, the entity it denotes can + be referred to by names from other scopes in the same + translation unit. + + -- When a name has no linkage, the entity it denotes cannot be + referred to by names from other scopes. */ + +enum linkage_kind { + lk_none, /* No linkage. */ + lk_internal, /* Internal linkage. */ + lk_external /* External linkage. */ +}; + +enum duration_kind { + dk_static, + dk_thread, + dk_auto, + dk_dynamic +}; + +/* Bitmask flags to control type substitution. */ +enum tsubst_flags { + tf_none = 0, /* nothing special */ + tf_error = 1 << 0, /* give error messages */ + tf_warning = 1 << 1, /* give warnings too */ + tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */ + tf_keep_type_decl = 1 << 3, /* retain typedef type decls + (make_typename_type use) */ + tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal + instantiate_type use) */ + tf_user = 1 << 5, /* found template must be a user template + (lookup_template_class use) */ + tf_conv = 1 << 6, /* We are determining what kind of + conversion might be permissible, + not actually performing the + conversion. */ + tf_decltype = 1 << 7, /* We are the operand of decltype. + Used to implement the special rules + for calls in decltype (5.2.2/11). */ + tf_partial = 1 << 8, /* Doing initial explicit argument + substitution in fn_type_unification. */ + tf_fndecl_type = 1 << 9, /* Substituting the type of a function + declaration. */ + tf_no_cleanup = 1 << 10, /* Do not build a cleanup + (build_target_expr and friends) */ + tf_norm = 1 << 11, /* Build diagnostic information during + constraint normalization. */ + tf_tst_ok = 1 << 12, /* Allow a typename-specifier to name + a template (C++17 or later). */ + tf_dguide = 1 << 13, /* Building a deduction guide from a ctor. */ + /* Convenient substitution flags combinations. */ + tf_warning_or_error = tf_warning | tf_error +}; + +/* This type is used for parameters and variables which hold + combinations of the flags in enum tsubst_flags. */ +typedef int tsubst_flags_t; + +/* The kind of checking we can do looking in a class hierarchy. */ +enum base_access_flags { + ba_any = 0, /* Do not check access, allow an ambiguous base, + prefer a non-virtual base */ + ba_unique = 1 << 0, /* Must be a unique base. */ + ba_check_bit = 1 << 1, /* Check access. */ + ba_check = ba_unique | ba_check_bit, + ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */ +}; + +/* This type is used for parameters and variables which hold + combinations of the flags in enum base_access_flags. */ +typedef int base_access; + +/* The various kinds of access check during parsing. */ +enum deferring_kind { + dk_no_deferred = 0, /* Check access immediately */ + dk_deferred = 1, /* Deferred check */ + dk_no_check = 2 /* No access check */ +}; + +/* The kind of base we can find, looking in a class hierarchy. + Values <0 indicate we failed. */ +enum base_kind { + bk_inaccessible = -3, /* The base is inaccessible */ + bk_ambig = -2, /* The base is ambiguous */ + bk_not_base = -1, /* It is not a base */ + bk_same_type = 0, /* It is the same type */ + bk_proper_base = 1, /* It is a proper base */ + bk_via_virtual = 2 /* It is a proper base, but via a virtual + path. This might not be the canonical + binfo. */ +}; + +/* Node for "pointer to (virtual) function". + This may be distinct from ptr_type_node so gdb can distinguish them. */ +#define vfunc_ptr_type_node vtable_entry_type + + +/* For building calls to `delete'. */ +extern GTY(()) tree integer_two_node; + +/* The number of function bodies which we are currently processing. + (Zero if we are at namespace scope, one inside the body of a + function, two inside the body of a function in a local class, etc.) */ +extern int function_depth; + +/* Nonzero if we are inside spec_hasher::equal, which affects + comparison of PARM_DECLs in cp_tree_equal. */ +extern int comparing_specializations; + +/* Nonzero if we want different dependent aliases to compare as unequal. + FIXME we should always do this except during deduction/ordering. */ +extern int comparing_dependent_aliases; + +/* In parser.cc. */ + +/* Nonzero if we are parsing an unevaluated operand: an operand to + sizeof, typeof, or alignof. This is a count since operands to + sizeof can be nested. */ + +extern int cp_unevaluated_operand; + +/* RAII class used to inhibit the evaluation of operands during parsing + and template instantiation. Evaluation warnings are also inhibited. */ + +class cp_unevaluated +{ +public: + cp_unevaluated (); + ~cp_unevaluated (); +}; + +/* The reverse: an RAII class used for nested contexts that are evaluated even + if the enclosing context is not. */ + +class cp_evaluated +{ +public: + int uneval; + int inhibit; + cp_evaluated (bool reset = true) + : uneval(cp_unevaluated_operand), inhibit(c_inhibit_evaluation_warnings) + { if (reset) + cp_unevaluated_operand = c_inhibit_evaluation_warnings = 0; } + ~cp_evaluated () + { cp_unevaluated_operand = uneval; + c_inhibit_evaluation_warnings = inhibit; } +}; + +/* in pt.cc */ + +/* These values are used for the `STRICT' parameter to type_unification and + fn_type_unification. Their meanings are described with the + documentation for fn_type_unification. */ + +enum unification_kind_t { + DEDUCE_CALL, + DEDUCE_CONV, + DEDUCE_EXACT +}; + +// An RAII class used to create a new pointer map for local +// specializations. When the stack goes out of scope, the +// previous pointer map is restored. +enum lss_policy { lss_blank, lss_copy, lss_nop }; +class local_specialization_stack +{ +public: + local_specialization_stack (lss_policy = lss_blank); + ~local_specialization_stack (); + + hash_map *saved; +}; + +/* Entry in the specialization hash table. */ +struct GTY((for_user)) spec_entry +{ + tree tmpl; /* The general template this is a specialization of. */ + tree args; /* The args for this (maybe-partial) specialization. */ + tree spec; /* The specialization itself. */ +}; + +/* in class.cc */ + +extern int current_class_depth; + +/* in decl.cc */ + +/* An array of static vars & fns. */ +extern GTY(()) vec *static_decls; + +/* An array of vtable-needing types that have no key function, or have + an emitted key function. */ +extern GTY(()) vec *keyed_classes; + +/* Here's where we control how name mangling takes place. */ + +/* Cannot use '$' up front, because this confuses gdb + (names beginning with '$' are gdb-local identifiers). + + Note that all forms in which the '$' is significant are long enough + for direct indexing (meaning that if we know there is a '$' + at a particular location, we can index into the string at + any other location that provides distinguishing characters). */ + +/* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler + doesn't allow '.' in symbol names. */ +#ifndef NO_DOT_IN_LABEL + +#define JOINER '.' +#define JOIN_STR "." + +#define AUTO_TEMP_NAME "_.tmp_" +#define VFIELD_BASE ".vf" +#define VFIELD_NAME "_vptr." +#define VFIELD_NAME_FORMAT "_vptr.%s" + +#else /* NO_DOT_IN_LABEL */ + +#ifndef NO_DOLLAR_IN_LABEL + +#define JOINER '$' +#define JOIN_STR "$" + +#define AUTO_TEMP_NAME "_$tmp_" +#define VFIELD_BASE "$vf" +#define VFIELD_NAME "_vptr$" +#define VFIELD_NAME_FORMAT "_vptr$%s" + +#else /* NO_DOLLAR_IN_LABEL */ + +#define JOIN_STR "_" + +#define VTABLE_NAME "__vt_" +#define VTABLE_NAME_P(ID_NODE) \ + (!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \ + sizeof (VTABLE_NAME) - 1)) +#define VFIELD_BASE "__vfb" +#define VFIELD_NAME "__vptr_" +#define VFIELD_NAME_P(ID_NODE) \ + (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \ + sizeof (VFIELD_NAME) - 1)) +#define VFIELD_NAME_FORMAT "__vptr_%s" + +#endif /* NO_DOLLAR_IN_LABEL */ +#endif /* NO_DOT_IN_LABEL */ + +#define UDLIT_OP_ANSI_PREFIX "operator\"\"" +#define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s" +#define UDLIT_OP_MANGLED_PREFIX "li" +#define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s" +#define UDLIT_OPER_P(ID_NODE) \ + (!strncmp (IDENTIFIER_POINTER (ID_NODE), \ + UDLIT_OP_ANSI_PREFIX, \ + sizeof (UDLIT_OP_ANSI_PREFIX) - 1)) +#define UDLIT_OP_SUFFIX(ID_NODE) \ + (IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1) + +#if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) + +#define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \ + && IDENTIFIER_POINTER (ID_NODE)[2] == 't' \ + && IDENTIFIER_POINTER (ID_NODE)[3] == JOINER) + +#define VFIELD_NAME_P(ID_NODE) \ + (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1)) + +#endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */ + + +/* Nonzero if we're done parsing and into end-of-file activities. + Two if we're done with front-end processing. */ + +extern int at_eof; + +/* True if note_mangling_alias should enqueue mangling aliases for + later generation, rather than emitting them right away. */ + +extern bool defer_mangling_aliases; + +/* True if noexcept is part of the type (i.e. in C++17). */ + +extern bool flag_noexcept_type; + +/* A list of namespace-scope objects which have constructors or + destructors which reside in the global scope. The decl is stored + in the TREE_VALUE slot and the initializer is stored in the + TREE_PURPOSE slot. */ +extern GTY(()) tree static_aggregates; +/* Likewise, for thread local storage. */ +extern GTY(()) tree tls_aggregates; + +/* A hash-map mapping from variable decls to the dynamic initializer for + the decl. This is currently only used by OpenMP. */ +extern GTY(()) decl_tree_map *dynamic_initializers; + +enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG }; + +/* These are uses as bits in flags passed to various functions to + control their behavior. Despite the LOOKUP_ prefix, many of these + do not control name lookup. ??? Functions using these flags should + probably be modified to accept explicit boolean flags for the + behaviors relevant to them. */ +/* Check for access violations. */ +#define LOOKUP_PROTECT (1 << 0) +#define LOOKUP_NORMAL (LOOKUP_PROTECT) +/* Even if the function found by lookup is a virtual function, it + should be called directly. */ +#define LOOKUP_NONVIRTUAL (1 << 1) +/* Non-converting (i.e., "explicit") constructors are not tried. This flag + indicates that we are not performing direct-initialization. */ +#define LOOKUP_ONLYCONVERTING (1 << 2) +#define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING) +/* If a temporary is created, it should be created so that it lives + as long as the current variable bindings; otherwise it only lives + until the end of the complete-expression. It also forces + direct-initialization in cases where other parts of the compiler + have already generated a temporary, such as reference + initialization and the catch parameter. */ +#define DIRECT_BIND (1 << 3) +/* We're performing a user-defined conversion, so more user-defined + conversions are not permitted (only built-in conversions). */ +#define LOOKUP_NO_CONVERSION (1 << 4) +/* The user has explicitly called a destructor. (Therefore, we do + not need to check that the object is non-NULL before calling the + destructor.) */ +#define LOOKUP_DESTRUCTOR (1 << 5) +/* Do not permit references to bind to temporaries. */ +#define LOOKUP_NO_TEMP_BIND (1 << 6) +/* We're trying to treat an lvalue as an rvalue. */ +/* FIXME remove when we extend the P1825 semantics to all standard modes, the + C++20 approach uses IMPLICIT_RVALUE_P instead. */ +#define LOOKUP_PREFER_RVALUE (LOOKUP_NO_TEMP_BIND << 1) +/* We're inside an init-list, so narrowing conversions are ill-formed. */ +#define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1) +/* We're looking up a constructor for list-initialization. */ +#define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1) +/* This is the first parameter of a copy constructor. */ +#define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1) +/* We only want to consider list constructors. */ +#define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1) +/* Return after determining which function to call and checking access. + Used by sythesized_method_walk to determine which functions will + be called to initialize subobjects, in order to determine exception + specification and possible implicit delete. + This is kind of a hack, but exiting early avoids problems with trying + to perform argument conversions when the class isn't complete yet. */ +#define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1) +/* Used by calls from defaulted functions to limit the overload set to avoid + cycles trying to declare them (core issue 1092). */ +#define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1) +/* Used in calls to store_init_value to suppress its usual call to + digest_init. */ +#define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1) +/* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */ +#define LOOKUP_NO_RVAL_BIND (LOOKUP_ALREADY_DIGESTED << 1) +/* Used by case_conversion to disregard non-integral conversions. */ +#define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1) +/* Used for delegating constructors in order to diagnose self-delegation. */ +#define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1) +/* Allow initialization of a flexible array members. */ +#define LOOKUP_ALLOW_FLEXARRAY_INIT (LOOKUP_DELEGATING_CONS << 1) +/* We're looking for either a rewritten comparison operator candidate or the + operator to use on the former's result. We distinguish between the two by + knowing that comparisons other than == and <=> must be the latter, as must + a <=> expression trying to rewrite to <=> without reversing. */ +#define LOOKUP_REWRITTEN (LOOKUP_ALLOW_FLEXARRAY_INIT << 1) +/* Reverse the order of the two arguments for comparison rewriting. First we + swap the arguments in add_operator_candidates, then we swap the conversions + in add_candidate (so that they correspond to the original order of the + args), then we swap the conversions back in build_new_op_1 (so they + correspond to the order of the args in the candidate). */ +#define LOOKUP_REVERSED (LOOKUP_REWRITTEN << 1) +/* We're initializing an aggregate from a parenthesized list of values. */ +#define LOOKUP_AGGREGATE_PAREN_INIT (LOOKUP_REVERSED << 1) + +/* These flags are used by the conversion code. + CONV_IMPLICIT : Perform implicit conversions (standard and user-defined). + CONV_STATIC : Perform the explicit conversions for static_cast. + CONV_CONST : Perform the explicit conversions for const_cast. + CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast. + CONV_PRIVATE : Perform upcasts to private bases. + CONV_FORCE_TEMP : Require a new temporary when converting to the same + aggregate type. */ + +#define CONV_IMPLICIT 1 +#define CONV_STATIC 2 +#define CONV_CONST 4 +#define CONV_REINTERPRET 8 +#define CONV_PRIVATE 16 +#define CONV_FORCE_TEMP 32 +#define CONV_FOLD 64 +#define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ + | CONV_REINTERPRET) +#define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ + | CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP) +#define CONV_BACKEND_CONVERT (CONV_OLD_CONVERT | CONV_FOLD) + +/* Used by build_expr_type_conversion to indicate which types are + acceptable as arguments to the expression under consideration. */ + +#define WANT_INT 1 /* integer types, including bool */ +#define WANT_FLOAT 2 /* floating point types */ +#define WANT_ENUM 4 /* enumerated types */ +#define WANT_POINTER 8 /* pointer types */ +#define WANT_NULL 16 /* null pointer constant */ +#define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */ +#define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX) + +/* Used with comptypes, and related functions, to guide type + comparison. */ + +#define COMPARE_STRICT 0 /* Just check if the types are the + same. */ +#define COMPARE_BASE 1 /* Check to see if the second type is + derived from the first. */ +#define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in + reverse. */ +#define COMPARE_REDECLARATION 4 /* The comparison is being done when + another declaration of an existing + entity is seen. */ +#define COMPARE_STRUCTURAL 8 /* The comparison is intended to be + structural. The actual comparison + will be identical to + COMPARE_STRICT. */ + +/* Used with start function. */ +#define SF_DEFAULT 0 /* No flags. */ +#define SF_PRE_PARSED 1 /* The function declaration has + already been parsed. */ +#define SF_INCLASS_INLINE 2 /* The function is an inline, defined + in the class body. */ + +/* Used with start_decl's initialized parameter. */ +#define SD_UNINITIALIZED 0 +#define SD_INITIALIZED 1 +/* Like SD_INITIALIZED, but also mark the new decl as DECL_DECOMPOSITION_P. */ +#define SD_DECOMPOSITION 2 +#define SD_DEFAULTED 3 +#define SD_DELETED 4 + +/* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2 + is derived from TYPE1, or if TYPE2 is a pointer (reference) to a + class derived from the type pointed to (referred to) by TYPE1. */ +#define same_or_base_type_p(TYPE1, TYPE2) \ + comptypes ((TYPE1), (TYPE2), COMPARE_BASE) + +/* These macros are used to access a TEMPLATE_PARM_INDEX. */ +#define TEMPLATE_PARM_INDEX_CAST(NODE) \ + ((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE)) +#define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index) +#define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level) +#define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE)) +#define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level) +#define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl) +#define TEMPLATE_PARM_PARAMETER_PACK(NODE) \ + (TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE))) + +/* These macros are for accessing the fields of TEMPLATE_TYPE_PARM, + TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */ +#define TEMPLATE_TYPE_PARM_INDEX(NODE) \ + (TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \ + TEMPLATE_TEMPLATE_PARM, \ + BOUND_TEMPLATE_TEMPLATE_PARM))) +#define TEMPLATE_TYPE_IDX(NODE) \ + (TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE))) +#define TEMPLATE_TYPE_LEVEL(NODE) \ + (TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) +#define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \ + (TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) +#define TEMPLATE_TYPE_DECL(NODE) \ + (TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE))) +#define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \ + (TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE))) + +/* For a C++17 class deduction placeholder, the template it represents. */ +#define CLASS_PLACEHOLDER_TEMPLATE(NODE) \ + (DECL_INITIAL (TYPE_NAME (TEMPLATE_TYPE_PARM_CHECK (NODE)))) + +/* Contexts in which auto deduction occurs. These flags are + used to control diagnostics in do_auto_deduction. */ + +enum auto_deduction_context +{ + adc_unspecified, /* Not given */ + adc_variable_type, /* Variable initializer deduction */ + adc_return_type, /* Return type deduction */ + adc_unify, /* Template argument deduction */ + adc_requirement, /* Argument deduction constraint */ + adc_decomp_type /* Decomposition declaration initializer deduction */ +}; + +/* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */ +#define AUTO_IS_DECLTYPE(NODE) \ + (TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE))) + +/* These constants can used as bit flags in the process of tree formatting. + + TFF_PLAIN_IDENTIFIER: unqualified part of a name. + TFF_SCOPE: include the class and namespace scope of the name. + TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name. + TFF_DECL_SPECIFIERS: print decl-specifiers. + TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with + a class-key (resp. `enum'). + TFF_RETURN_TYPE: include function return type. + TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values. + TFF_EXCEPTION_SPECIFICATION: show function exception specification. + TFF_TEMPLATE_HEADER: show the template<...> header in a + template-declaration. + TFF_TEMPLATE_NAME: show only template-name. + TFF_EXPR_IN_PARENS: parenthesize expressions. + TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments. + TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the + top-level entity. + TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments + identical to their defaults. + TFF_NO_TEMPLATE_BINDINGS: do not print information about the template + arguments for a function template specialization. + TFF_POINTER: we are printing a pointer type. */ + +#define TFF_PLAIN_IDENTIFIER (0) +#define TFF_SCOPE (1) +#define TFF_CHASE_TYPEDEF (1 << 1) +#define TFF_DECL_SPECIFIERS (1 << 2) +#define TFF_CLASS_KEY_OR_ENUM (1 << 3) +#define TFF_RETURN_TYPE (1 << 4) +#define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5) +#define TFF_EXCEPTION_SPECIFICATION (1 << 6) +#define TFF_TEMPLATE_HEADER (1 << 7) +#define TFF_TEMPLATE_NAME (1 << 8) +#define TFF_EXPR_IN_PARENS (1 << 9) +#define TFF_NO_FUNCTION_ARGUMENTS (1 << 10) +#define TFF_UNQUALIFIED_NAME (1 << 11) +#define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12) +#define TFF_NO_TEMPLATE_BINDINGS (1 << 13) +#define TFF_POINTER (1 << 14) + +/* These constants can be used as bit flags to control strip_typedefs. + + STF_USER_VISIBLE: use heuristics to try to avoid stripping user-facing + aliases of internal details. This is intended for diagnostics, + where it should (for example) give more useful "aka" types. + + STF_STRIP_DEPENDENT: allow the stripping of aliases with dependent + template parameters, relying on code elsewhere to report any + appropriate diagnostics. */ +const unsigned int STF_USER_VISIBLE = 1U; +const unsigned int STF_STRIP_DEPENDENT = 1U << 1; + +/* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM + node. */ +#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \ + ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \ + ? TYPE_TI_TEMPLATE (NODE) \ + : TYPE_NAME (NODE)) + +/* in lex.cc */ + +extern void init_reswords (void); + +/* Various flags for the overloaded operator information. */ +enum ovl_op_flags { + OVL_OP_FLAG_NONE = 0, /* Don't care. */ + OVL_OP_FLAG_UNARY = 1, /* Is unary. */ + OVL_OP_FLAG_BINARY = 2, /* Is binary. */ + OVL_OP_FLAG_AMBIARY = 3, /* May be unary or binary. */ + OVL_OP_FLAG_ALLOC = 4, /* operator new or delete. */ + OVL_OP_FLAG_DELETE = 1, /* operator delete. */ + OVL_OP_FLAG_VEC = 2 /* vector new or delete. */ +}; + +/* Compressed operator codes. Order is determined by operators.def + and does not match that of tree_codes. */ +enum ovl_op_code { + OVL_OP_ERROR_MARK, + OVL_OP_NOP_EXPR, +#define DEF_OPERATOR(NAME, CODE, MANGLING, FLAGS) OVL_OP_##CODE, +#define DEF_ASSN_OPERATOR(NAME, CODE, MANGLING) /* NOTHING */ +#include "operators.def" + OVL_OP_MAX +}; + +/* Make sure it fits in lang_decl_fn::ovl_op_code. */ +STATIC_ASSERT (OVL_OP_MAX < (1 << 6)); + +struct GTY(()) ovl_op_info_t { + /* The IDENTIFIER_NODE for the operator. */ + tree identifier; + /* The name of the operator. */ + const char *name; + /* The mangled name of the operator. */ + const char *mangled_name; + /* The (regular) tree code. */ + enum tree_code tree_code : 16; + /* The (compressed) operator code. */ + enum ovl_op_code ovl_op_code : 8; + /* The ovl_op_flags of the operator */ + unsigned flags : 8; +}; + +/* Overloaded operator info indexed by ass_op_p & ovl_op_code. */ +extern GTY(()) ovl_op_info_t ovl_op_info[2][OVL_OP_MAX]; +/* Mapping from tree_codes to ovl_op_codes. */ +extern GTY(()) unsigned char ovl_op_mapping[MAX_TREE_CODES]; +/* Mapping for ambi-ary operators from the binary to the unary. */ +extern GTY(()) unsigned char ovl_op_alternate[OVL_OP_MAX]; + +/* Given an ass_op_p boolean and a tree code, return a pointer to its + overloaded operator info. Tree codes for non-overloaded operators + map to the error-operator. */ +#define OVL_OP_INFO(IS_ASS_P, TREE_CODE) \ + (&ovl_op_info[(IS_ASS_P) != 0][ovl_op_mapping[(TREE_CODE)]]) +/* Overloaded operator info for an identifier for which + IDENTIFIER_OVL_OP_P is true. */ +#define IDENTIFIER_OVL_OP_INFO(NODE) \ + (&ovl_op_info[IDENTIFIER_KIND_BIT_0 (NODE)][IDENTIFIER_CP_INDEX (NODE)]) +#define IDENTIFIER_OVL_OP_FLAGS(NODE) \ + (IDENTIFIER_OVL_OP_INFO (NODE)->flags) + +inline tree ovl_op_identifier (bool isass, tree_code code) +{ return OVL_OP_INFO(isass, code)->identifier; } +inline tree ovl_op_identifier (tree_code code) { return ovl_op_identifier (false, code); } +#define assign_op_identifier (ovl_op_info[true][OVL_OP_NOP_EXPR].identifier) +#define call_op_identifier (ovl_op_info[false][OVL_OP_CALL_EXPR].identifier) + +/* A type-qualifier, or bitmask therefore, using the TYPE_QUAL + constants. */ + +typedef int cp_cv_quals; + +/* Non-static member functions have an optional virt-specifier-seq. + There is a VIRT_SPEC value for each virt-specifier. + They can be combined by bitwise-or to form the complete set of + virt-specifiers for a member function. */ +enum virt_specifier + { + VIRT_SPEC_UNSPECIFIED = 0x0, + VIRT_SPEC_FINAL = 0x1, + VIRT_SPEC_OVERRIDE = 0x2 + }; + +/* A type-qualifier, or bitmask therefore, using the VIRT_SPEC + constants. */ + +typedef int cp_virt_specifiers; + +/* Wherever there is a function-cv-qual, there could also be a ref-qualifier: + + [dcl.fct] + The return type, the parameter-type-list, the ref-qualifier, and + the cv-qualifier-seq, but not the default arguments or the exception + specification, are part of the function type. + + REF_QUAL_NONE Ordinary member function with no ref-qualifier + REF_QUAL_LVALUE Member function with the &-ref-qualifier + REF_QUAL_RVALUE Member function with the &&-ref-qualifier */ + +enum cp_ref_qualifier { + REF_QUAL_NONE = 0, + REF_QUAL_LVALUE = 1, + REF_QUAL_RVALUE = 2 +}; + +/* A storage class. */ + +enum cp_storage_class { + /* sc_none must be zero so that zeroing a cp_decl_specifier_seq + sets the storage_class field to sc_none. */ + sc_none = 0, + sc_auto, + sc_register, + sc_static, + sc_extern, + sc_mutable +}; + +/* An individual decl-specifier. This is used to index the array of + locations for the declspecs in struct cp_decl_specifier_seq + below. */ + +enum cp_decl_spec { + ds_first, + ds_signed = ds_first, + ds_unsigned, + ds_short, + ds_long, + ds_const, + ds_volatile, + ds_restrict, + ds_inline, + ds_virtual, + ds_explicit, + ds_friend, + ds_typedef, + ds_alias, + ds_constexpr, + ds_complex, + ds_constinit, + ds_consteval, + ds_thread, + ds_type_spec, + ds_redefined_builtin_type_spec, + ds_attribute, + ds_std_attribute, + ds_storage_class, + ds_long_long, + ds_concept, + ds_last /* This enumerator must always be the last one. */ +}; + +/* A decl-specifier-seq. */ + +struct cp_decl_specifier_seq { + /* An array of locations for the declaration sepecifiers, indexed by + enum cp_decl_spec_word. */ + location_t locations[ds_last]; + /* The primary type, if any, given by the decl-specifier-seq. + Modifiers, like "short", "const", and "unsigned" are not + reflected here. This field will be a TYPE, unless a typedef-name + was used, in which case it will be a TYPE_DECL. */ + tree type; + /* The attributes, if any, provided with the specifier sequence. */ + tree attributes; + /* The c++11 attributes that follows the type specifier. */ + tree std_attributes; + /* If non-NULL, a built-in type that the user attempted to redefine + to some other type. */ + tree redefined_builtin_type; + /* The explicit-specifier, if any. */ + tree explicit_specifier; + /* The storage class specified -- or sc_none if no storage class was + explicitly specified. */ + cp_storage_class storage_class; + /* For the __intN declspec, this stores the index into the int_n_* arrays. */ + int int_n_idx; + /* True iff TYPE_SPEC defines a class or enum. */ + BOOL_BITFIELD type_definition_p : 1; + /* True iff multiple types were (erroneously) specified for this + decl-specifier-seq. */ + BOOL_BITFIELD multiple_types_p : 1; + /* True iff multiple storage classes were (erroneously) specified + for this decl-specifier-seq or a combination of a storage class + with a typedef specifier. */ + BOOL_BITFIELD conflicting_specifiers_p : 1; + /* True iff at least one decl-specifier was found. */ + BOOL_BITFIELD any_specifiers_p : 1; + /* True iff at least one type-specifier was found. */ + BOOL_BITFIELD any_type_specifiers_p : 1; + /* True iff "int" was explicitly provided. */ + BOOL_BITFIELD explicit_int_p : 1; + /* True iff "__intN" was explicitly provided. */ + BOOL_BITFIELD explicit_intN_p : 1; + /* True iff "char" was explicitly provided. */ + BOOL_BITFIELD explicit_char_p : 1; + /* True iff ds_thread is set for __thread, not thread_local. */ + BOOL_BITFIELD gnu_thread_keyword_p : 1; + /* True iff the type is a decltype. */ + BOOL_BITFIELD decltype_p : 1; + /* True iff the alternate "__intN__" form of the __intN type has been + used. */ + BOOL_BITFIELD int_n_alt: 1; +}; + +/* The various kinds of declarators. */ + +enum cp_declarator_kind { + cdk_id, + cdk_function, + cdk_array, + cdk_pointer, + cdk_reference, + cdk_ptrmem, + cdk_decomp, + cdk_error +}; + +/* A declarator. */ + +typedef struct cp_declarator cp_declarator; + +typedef struct cp_parameter_declarator cp_parameter_declarator; + +/* A parameter, before it has been semantically analyzed. */ +struct cp_parameter_declarator { + /* The next parameter, or NULL_TREE if none. */ + cp_parameter_declarator *next; + /* The decl-specifiers-seq for the parameter. */ + cp_decl_specifier_seq decl_specifiers; + /* The declarator for the parameter. */ + cp_declarator *declarator; + /* The default-argument expression, or NULL_TREE, if none. */ + tree default_argument; + /* True iff this is a template parameter pack. */ + bool template_parameter_pack_p; + /* Location within source. */ + location_t loc; +}; + +/* A declarator. */ +struct cp_declarator { + /* The kind of declarator. */ + ENUM_BITFIELD (cp_declarator_kind) kind : 4; + /* Whether we parsed an ellipsis (`...') just before the declarator, + to indicate this is a parameter pack. */ + BOOL_BITFIELD parameter_pack_p : 1; + /* If this declarator is parenthesized, this the open-paren. It is + UNKNOWN_LOCATION when not parenthesized. */ + location_t parenthesized; + /* Currently only set for cdk_id, cdk_decomp and cdk_function. */ + location_t id_loc; + /* If this declarator is part of an init-declarator, the location of the + initializer. */ + location_t init_loc; + /* GNU Attributes that apply to this declarator. If the declarator + is a pointer or a reference, these attribute apply to the type + pointed to. */ + tree attributes; + /* Standard C++11 attributes that apply to this declarator. If the + declarator is a pointer or a reference, these attributes apply + to the pointer, rather than to the type pointed to. */ + tree std_attributes; + /* For all but cdk_id, cdk_decomp and cdk_error, the contained declarator. + For cdk_id, cdk_decomp and cdk_error, guaranteed to be NULL. */ + cp_declarator *declarator; + union { + /* For identifiers. */ + struct { + /* If non-NULL, the qualifying scope (a NAMESPACE_DECL or + *_TYPE) for this identifier. */ + tree qualifying_scope; + /* The unqualified name of the entity -- an IDENTIFIER_NODE, + BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */ + tree unqualified_name; + /* If this is the name of a function, what kind of special + function (if any). */ + special_function_kind sfk; + } id; + /* For functions. */ + struct { + /* The parameters to the function as a TREE_LIST of decl/default. */ + tree parameters; + /* The cv-qualifiers for the function. */ + cp_cv_quals qualifiers; + /* The virt-specifiers for the function. */ + cp_virt_specifiers virt_specifiers; + /* The ref-qualifier for the function. */ + cp_ref_qualifier ref_qualifier; + /* The transaction-safety qualifier for the function. */ + tree tx_qualifier; + /* The exception-specification for the function. */ + tree exception_specification; + /* The late-specified return type, if any. */ + tree late_return_type; + /* The trailing requires-clause, if any. */ + tree requires_clause; + location_t parens_loc; + } function; + /* For arrays. */ + struct { + /* The bounds to the array. */ + tree bounds; + } array; + /* For cdk_pointer and cdk_ptrmem. */ + struct { + /* The cv-qualifiers for the pointer. */ + cp_cv_quals qualifiers; + /* For cdk_ptrmem, the class type containing the member. */ + tree class_type; + } pointer; + /* For cdk_reference */ + struct { + /* The cv-qualifiers for the reference. These qualifiers are + only used to diagnose ill-formed code. */ + cp_cv_quals qualifiers; + /* Whether this is an rvalue reference */ + bool rvalue_ref; + } reference; + } u; +}; + +/* A level of template instantiation. */ +struct GTY((chain_next ("%h.next"))) tinst_level { + /* The immediately deeper level in the chain. */ + struct tinst_level *next; + + /* The original node. TLDCL can be a DECL (for a function or static + data member), a TYPE (for a class), depending on what we were + asked to instantiate, or a TREE_LIST with the template as PURPOSE + and the template args as VALUE, if we are substituting for + overload resolution. In all these cases, TARGS is NULL. + However, to avoid creating TREE_LIST objects for substitutions if + we can help, we store PURPOSE and VALUE in TLDCL and TARGS, + respectively. So TLDCL stands for TREE_LIST or DECL (the + template is a DECL too), whereas TARGS stands for the template + arguments. */ + tree tldcl, targs; + + /* For modules we need to know (a) the modules on the path of + instantiation and (b) the transitive imports along that path. + Note that these two bitmaps may be inherited from NEXT, if this + decl is in the same module as NEXT (or has no new information). */ + bitmap path; + bitmap visible; + + private: + /* Return TRUE iff the original node is a split list. */ + bool split_list_p () const { return targs; } + + /* Return TRUE iff the original node is a TREE_LIST object. */ + bool tree_list_p () const + { + return !split_list_p () && TREE_CODE (tldcl) == TREE_LIST; + } + + /* Return TRUE iff the original node is not a list, split or not. */ + bool not_list_p () const + { + return !split_list_p () && !tree_list_p (); + } + + /* Convert (in place) the original node from a split list to a + TREE_LIST. */ + tree to_list (); + + public: + /* Release storage for OBJ and node, if it's a TREE_LIST. */ + static void free (tinst_level *obj); + + /* Return TRUE iff the original node is a list, split or not. */ + bool list_p () const { return !not_list_p (); } + + /* Return the original node; if it's a split list, make it a + TREE_LIST first, so that it can be returned as a single tree + object. */ + tree get_node () { + if (!split_list_p ()) return tldcl; + else return to_list (); + } + + /* Return the original node if it's a DECL or a TREE_LIST, but do + NOT convert a split list to a TREE_LIST: return NULL instead. */ + tree maybe_get_node () const { + if (!split_list_p ()) return tldcl; + else return NULL_TREE; + } + + /* The location where the template is instantiated. */ + location_t locus; + + /* errorcount + sorrycount when we pushed this level. */ + unsigned short errors; + + /* Count references to this object. If refcount reaches + refcount_infinity value, we don't increment or decrement the + refcount anymore, as the refcount isn't accurate anymore. + The object can be still garbage collected if unreferenced from + anywhere, which might keep referenced objects referenced longer than + otherwise necessary. Hitting the infinity is rare though. */ + unsigned short refcount; + + /* Infinity value for the above refcount. */ + static const unsigned short refcount_infinity = (unsigned short) ~0; +}; + +/* BUILT_IN_FRONTEND function codes. */ +enum cp_built_in_function { + CP_BUILT_IN_IS_CONSTANT_EVALUATED, + CP_BUILT_IN_INTEGER_PACK, + CP_BUILT_IN_IS_CORRESPONDING_MEMBER, + CP_BUILT_IN_IS_POINTER_INTERCONVERTIBLE_WITH_CLASS, + CP_BUILT_IN_SOURCE_LOCATION, + CP_BUILT_IN_LAST +}; + +bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec); + +/* Return the type of the `this' parameter of FNTYPE. */ + +inline tree +type_of_this_parm (const_tree fntype) +{ + function_args_iterator iter; + gcc_assert (TREE_CODE (fntype) == METHOD_TYPE); + function_args_iter_init (&iter, fntype); + return function_args_iter_cond (&iter); +} + +/* Return the class of the `this' parameter of FNTYPE. */ + +inline tree +class_of_this_parm (const_tree fntype) +{ + return TREE_TYPE (type_of_this_parm (fntype)); +} + +/* A parameter list indicating for a function with no parameters, + e.g "int f(void)". */ +extern cp_parameter_declarator *no_parameters; + +/* Various dump ids. */ +extern int class_dump_id; +extern int module_dump_id; +extern int raw_dump_id; + +/* in call.cc */ +extern bool check_dtor_name (tree, tree); +int magic_varargs_p (tree); + +extern tree build_conditional_expr (const op_location_t &, + tree, tree, tree, + tsubst_flags_t); +extern tree build_addr_func (tree, tsubst_flags_t); +extern void set_flags_from_callee (tree); +extern tree build_call_a (tree, int, tree*); +extern tree build_call_n (tree, int, ...); +extern bool null_ptr_cst_p (tree); +extern bool null_member_pointer_value_p (tree); +extern bool sufficient_parms_p (const_tree); +extern tree type_decays_to (tree); +extern tree extract_call_expr (tree); +extern tree build_trivial_dtor_call (tree, bool = false); +extern bool ref_conv_binds_directly_p (tree, tree); +extern tree build_user_type_conversion (tree, tree, int, + tsubst_flags_t); +extern tree build_new_function_call (tree, vec **, + tsubst_flags_t); +extern tree build_operator_new_call (tree, vec **, + tree *, tree *, tree, tree, + tree *, tsubst_flags_t); +extern tree build_new_method_call (tree, tree, + vec **, tree, + int, tree *, tsubst_flags_t); +extern tree build_special_member_call (tree, tree, + vec **, + tree, int, tsubst_flags_t); +extern tree build_new_op (const op_location_t &, + enum tree_code, + int, tree, tree, tree, tree, + tree *, tsubst_flags_t); +/* Wrapper that leaves out the usually-null op3 and overload parms. */ +inline tree build_new_op (const op_location_t &loc, enum tree_code code, + int flags, tree arg1, tree arg2, + tsubst_flags_t complain) +{ + return build_new_op (loc, code, flags, arg1, arg2, NULL_TREE, NULL_TREE, + NULL, complain); +} +extern tree build_op_call (tree, vec **, + tsubst_flags_t); +extern tree build_op_subscript (const op_location_t &, tree, + vec **, tree *, + tsubst_flags_t); +extern bool aligned_allocation_fn_p (tree); +extern tree destroying_delete_p (tree); +extern bool usual_deallocation_fn_p (tree); +extern tree build_op_delete_call (enum tree_code, tree, tree, + bool, tree, tree, + tsubst_flags_t); +extern bool can_convert (tree, tree, tsubst_flags_t); +extern bool can_convert_standard (tree, tree, tsubst_flags_t); +extern bool can_convert_arg (tree, tree, tree, int, + tsubst_flags_t); +extern bool can_convert_arg_bad (tree, tree, tree, int, + tsubst_flags_t); +extern int conv_flags (int, int, tree, tree, int); +extern struct conversion * good_conversion (tree, tree, tree, int, tsubst_flags_t); +extern location_t get_fndecl_argument_location (tree, int); +extern void complain_about_bad_argument (location_t arg_loc, + tree from_type, tree to_type, + tree fndecl, int parmnum); +extern void maybe_inform_about_fndecl_for_bogus_argument_init (tree, int); +extern tree perform_dguide_overload_resolution (tree, const vec *, + tsubst_flags_t); + + +/* A class for recording information about access failures (e.g. private + fields), so that we can potentially supply a fix-it hint about + an accessor (from a context in which the constness of the object + is known). */ + +class access_failure_info +{ + public: + access_failure_info () : m_was_inaccessible (false), + m_basetype_path (NULL_TREE), + m_decl (NULL_TREE), m_diag_decl (NULL_TREE) {} + + void record_access_failure (tree basetype_path, tree decl, tree diag_decl); + + bool was_inaccessible_p () const { return m_was_inaccessible; } + tree get_decl () const { return m_decl; } + tree get_diag_decl () const { return m_diag_decl; } + tree get_any_accessor (bool const_p) const; + void maybe_suggest_accessor (bool const_p) const; + static void add_fixit_hint (rich_location *richloc, tree accessor); + + private: + bool m_was_inaccessible; + tree m_basetype_path; + tree m_decl; + tree m_diag_decl; +}; + +extern void complain_about_access (tree, tree, tree, bool, + access_kind); +extern void push_defarg_context (tree); +extern void pop_defarg_context (void); +extern tree convert_default_arg (tree, tree, tree, int, + tsubst_flags_t); +extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t); +extern tree build_x_va_arg (location_t, tree, tree); +extern tree cxx_type_promotes_to (tree); +extern tree type_passed_as (tree); +extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t); +extern bool is_properly_derived_from (tree, tree); +extern tree initialize_reference (tree, tree, int, + tsubst_flags_t); +extern tree extend_ref_init_temps (tree, tree, + vec**, + tree * = NULL); +extern tree make_temporary_var_for_ref_to_temp (tree, tree); +extern bool type_has_extended_temps (tree); +extern tree strip_top_quals (tree); +extern bool reference_related_p (tree, tree); +extern bool reference_compatible_p (tree, tree); +extern int remaining_arguments (tree); +extern tree build_implicit_conv_flags (tree, tree, int); +extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t); +extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int); +extern tree build_converted_constant_expr (tree, tree, tsubst_flags_t); +extern tree build_converted_constant_bool_expr (tree, tsubst_flags_t); +extern tree perform_direct_initialization_if_possible (tree, tree, bool, + tsubst_flags_t); +extern vec *resolve_args (vec*, tsubst_flags_t); +extern tree in_charge_arg_for_name (tree); +extern bool in_immediate_context (); +extern tree build_cxx_call (tree, int, tree *, + tsubst_flags_t, + tree = NULL_TREE); +extern bool is_std_init_list (tree); +extern bool is_list_ctor (tree); +extern void validate_conversion_obstack (void); +extern void mark_versions_used (tree); +extern int unsafe_return_slot_p (tree); +extern bool make_safe_copy_elision (tree, tree); +extern bool cp_handle_deprecated_or_unavailable (tree, tsubst_flags_t = tf_warning_or_error); +extern void cp_warn_deprecated_use_scopes (tree); +extern tree get_function_version_dispatcher (tree); + +/* in class.cc */ +extern tree build_vfield_ref (tree, tree); +extern tree build_if_in_charge (tree true_stmt, tree false_stmt = void_node); +extern tree build_base_path (enum tree_code, tree, + tree, int, tsubst_flags_t); +extern tree convert_to_base (tree, tree, bool, bool, + tsubst_flags_t); +extern tree convert_to_base_statically (tree, tree); +extern bool is_empty_base_ref (tree); +extern tree build_vtbl_ref (tree, tree); +extern tree build_vfn_ref (tree, tree); +extern tree get_vtable_decl (tree, int); +extern bool add_method (tree, tree, bool); +extern tree declared_access (tree); +extern bool maybe_push_used_methods (tree); +extern tree currently_open_class (tree); +extern tree currently_open_derived_class (tree); +extern tree outermost_open_class (void); +extern tree current_nonlambda_class_type (void); +extern tree finish_struct (tree, tree); +extern void finish_struct_1 (tree); +extern int resolves_to_fixed_type_p (tree, int * = NULL); +extern void init_class_processing (void); +extern int is_empty_class (tree); +extern bool is_really_empty_class (tree, bool); +extern void pushclass (tree); +extern void popclass (void); +extern void push_nested_class (tree); +extern void pop_nested_class (void); +extern int current_lang_depth (void); +extern void push_lang_context (tree); +extern void pop_lang_context (void); +extern tree instantiate_type (tree, tree, tsubst_flags_t); +extern void build_self_reference (void); +extern int same_signature_p (const_tree, const_tree); +extern tree lookup_vfn_in_binfo (tree, tree); +extern void maybe_add_class_template_decl_list (tree, tree, int); +extern void unreverse_member_declarations (tree); +extern bool is_empty_field (tree); +extern void invalidate_class_lookup_cache (void); +extern void maybe_note_name_used_in_class (tree, tree); +extern void note_name_declared_in_class (tree, tree); +extern tree get_vtbl_decl_for_binfo (tree); +extern bool vptr_via_virtual_p (tree); +extern void debug_class (tree); +extern void debug_thunks (tree); +extern void set_linkage_according_to_type (tree, tree); +extern void determine_key_method (tree); +extern void check_for_override (tree, tree); +extern void push_class_stack (void); +extern void pop_class_stack (void); +extern bool default_ctor_p (const_tree); +extern bool type_has_user_nondefault_constructor (tree); +extern tree in_class_defaulted_default_constructor (tree); +extern bool user_provided_p (tree); +extern bool type_has_user_provided_constructor (tree); +extern bool type_has_non_user_provided_default_constructor (tree); +extern bool vbase_has_user_provided_move_assign (tree); +extern tree default_init_uninitialized_part (tree); +extern bool trivial_default_constructor_is_constexpr (tree); +extern bool type_has_constexpr_default_constructor (tree); +extern bool type_has_constexpr_destructor (tree); +extern bool type_has_virtual_destructor (tree); +extern bool classtype_has_move_assign_or_move_ctor_p (tree, bool user_declared); +extern bool classtype_has_non_deleted_move_ctor (tree); +extern tree classtype_has_depr_implicit_copy (tree); +extern bool classtype_has_op (tree, tree_code); +extern tree classtype_has_defaulted_op (tree, tree_code); +extern bool type_build_ctor_call (tree); +extern bool type_build_dtor_call (tree); +extern void explain_non_literal_class (tree); +extern void inherit_targ_abi_tags (tree); +extern void defaulted_late_check (tree); +extern bool defaultable_fn_check (tree); +extern void check_abi_tags (tree); +extern tree missing_abi_tags (tree); +extern void fixup_type_variants (tree); +extern void fixup_attribute_variants (tree); +extern void build_cdtor_clones (tree, bool, bool, bool); +extern void clone_cdtor (tree, bool); +extern tree copy_operator_fn (tree, tree_code code); +extern void adjust_clone_args (tree); +extern void deduce_noexcept_on_destructor (tree); +extern bool uniquely_derived_from_p (tree, tree); +extern bool publicly_uniquely_derived_p (tree, tree); +extern tree common_enclosing_class (tree, tree); + +/* in cvt.cc */ +extern tree convert_to_reference (tree, tree, int, int, tree, + tsubst_flags_t); +extern tree convert_from_reference (tree); +extern tree force_rvalue (tree, tsubst_flags_t); +extern tree ocp_convert (tree, tree, int, int, + tsubst_flags_t); +extern tree cp_convert (tree, tree, tsubst_flags_t); +extern tree cp_convert_and_check (tree, tree, tsubst_flags_t); +extern tree cp_fold_convert (tree, tree); +extern tree cp_get_callee (tree); +extern tree cp_get_callee_fndecl (tree); +extern tree cp_get_callee_fndecl_nofold (tree); +extern tree cp_get_fndecl_from_callee (tree, bool fold = true); +extern tree convert_to_void (tree, impl_conv_void, + tsubst_flags_t); +extern tree convert_force (tree, tree, int, + tsubst_flags_t); +extern tree build_expr_type_conversion (int, tree, bool); +extern tree type_promotes_to (tree); +extern bool can_convert_qual (tree, tree); +extern tree perform_qualification_conversions (tree, tree); +extern bool tx_safe_fn_type_p (tree); +extern tree tx_unsafe_fn_variant (tree); +extern bool fnptr_conv_p (tree, tree); +extern tree strip_fnptr_conv (tree); + +/* in name-lookup.cc */ +extern void maybe_push_cleanup_level (tree); +extern tree maybe_push_decl (tree); +extern tree current_decl_namespace (void); + +/* decl.cc */ +extern tree poplevel (int, int, int); +extern void cxx_init_decl_processing (void); +enum cp_tree_node_structure_enum cp_tree_node_structure + (union lang_tree_node *); +extern void finish_scope (void); +extern void push_switch (tree); +extern void pop_switch (void); +extern void note_break_stmt (void); +extern bool note_iteration_stmt_body_start (void); +extern void note_iteration_stmt_body_end (bool); +extern void determine_local_discriminator (tree); +extern int decls_match (tree, tree, bool = true); +extern bool maybe_version_functions (tree, tree, bool); +extern bool merge_default_template_args (tree, tree, bool); +extern tree duplicate_decls (tree, tree, + bool hiding = false, + bool was_hidden = false); +extern tree declare_local_label (tree); +extern tree define_label (location_t, tree); +extern void check_goto (tree); +extern bool check_omp_return (void); +extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t); +extern tree build_typename_type (tree, tree, tree, tag_types); +extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t); +extern tree make_unbound_class_template_raw (tree, tree, tree); +extern unsigned push_abi_namespace (tree node = abi_node); +extern void pop_abi_namespace (unsigned flags, + tree node = abi_node); +extern tree build_library_fn_ptr (const char *, tree, int); +extern tree build_cp_library_fn_ptr (const char *, tree, int); +extern tree push_library_fn (tree, tree, tree, int); +extern tree push_throw_library_fn (tree, tree); +extern void warn_misplaced_attr_for_class_type (location_t location, + tree class_type); +extern tree check_tag_decl (cp_decl_specifier_seq *, bool); +extern tree shadow_tag (cp_decl_specifier_seq *); +extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool); +extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *); +extern void start_decl_1 (tree, bool); +extern bool check_array_initializer (tree, tree, tree); +extern void omp_declare_variant_finalize (tree, tree); +extern void cp_finish_decl (tree, tree, bool, tree, int); +extern tree lookup_decomp_type (tree); +extern void cp_maybe_mangle_decomp (tree, tree, unsigned int); +extern void cp_finish_decomp (tree, tree, unsigned int); +extern int cp_complete_array_type (tree *, tree, bool); +extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t); +extern tree build_ptrmemfunc_type (tree); +extern tree build_ptrmem_type (tree, tree); +/* the grokdeclarator prototype is in decl.h */ +extern tree build_this_parm (tree, tree, cp_cv_quals); +extern tree grokparms (tree, tree *); +extern int copy_fn_p (const_tree); +extern bool move_fn_p (const_tree); +extern bool move_signature_fn_p (const_tree); +extern tree get_scope_of_declarator (const cp_declarator *); +extern void grok_special_member_properties (tree); +extern bool grok_ctor_properties (const_tree, const_tree); +extern bool grok_op_properties (tree, bool); +extern tree xref_tag (tag_types, tree, + TAG_how = TAG_how::CURRENT_ONLY, + bool tpl_header_p = false); +extern void xref_basetypes (tree, tree); +extern tree start_enum (tree, tree, tree, tree, bool, bool *); +extern void finish_enum_value_list (tree); +extern void finish_enum (tree); +extern tree build_enumerator (tree, tree, tree, tree, location_t); +extern tree lookup_enumerator (tree, tree); +extern bool start_preparsed_function (tree, tree, int); +extern bool start_function (cp_decl_specifier_seq *, + const cp_declarator *, tree); +extern tree begin_function_body (void); +extern void finish_function_body (tree); +extern tree outer_curly_brace_block (tree); +extern tree finish_function (bool); +extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree); +extern void maybe_register_incomplete_var (tree); +extern void maybe_commonize_var (tree); +extern void complete_vars (tree); +extern tree static_fn_type (tree); +extern void revert_static_member_fn (tree); +extern void fixup_anonymous_aggr (tree); +extern tree compute_array_index_type (tree, tree, tsubst_flags_t); +extern tree check_default_argument (tree, tree, tsubst_flags_t); +extern int wrapup_namespace_globals (); +extern tree create_implicit_typedef (tree, tree); +extern int local_variable_p (const_tree); +extern tree register_dtor_fn (tree); +extern tmpl_spec_kind current_tmpl_spec_kind (int); +extern tree cxx_builtin_function (tree decl); +extern tree cxx_builtin_function_ext_scope (tree decl); +extern tree cxx_simulate_builtin_function_decl (tree); +extern tree check_elaborated_type_specifier (enum tag_types, tree, bool); +extern void warn_extern_redeclared_static (tree, tree); +extern tree cxx_comdat_group (tree); +extern bool cp_missing_noreturn_ok_p (tree); +extern bool is_direct_enum_init (tree, tree); +extern void initialize_artificial_var (tree, vec *); +extern tree check_var_type (tree, tree, location_t); +extern tree reshape_init (tree, tree, tsubst_flags_t); +extern tree next_initializable_field (tree); +extern tree next_subobject_field (tree); +extern tree first_field (const_tree); +extern tree fndecl_declared_return_type (tree); +extern bool undeduced_auto_decl (tree); +extern bool require_deduced_type (tree, tsubst_flags_t = tf_warning_or_error); + +extern tree finish_case_label (location_t, tree, tree); +extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t); +extern bool check_array_designated_initializer (constructor_elt *, + unsigned HOST_WIDE_INT); +extern bool check_for_uninitialized_const_var (tree, bool, tsubst_flags_t); +extern tree build_explicit_specifier (tree, tsubst_flags_t); +extern void do_push_parm_decls (tree, tree, tree *); +extern tree do_aggregate_paren_init (tree, tree); + +/* in decl2.cc */ +extern void record_mangling (tree, bool); +extern void overwrite_mangling (tree, tree); +extern void note_mangling_alias (tree, tree); +extern void generate_mangling_aliases (void); +extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier); +extern tree build_pointer_ptrmemfn_type (tree); +extern tree change_return_type (tree, tree); +extern void maybe_retrofit_in_chrg (tree); +extern void maybe_make_one_only (tree); +extern bool vague_linkage_p (tree); +extern void grokclassfn (tree, tree, + enum overload_flags); +extern tree grok_array_decl (location_t, tree, tree, + vec **, tsubst_flags_t); +extern tree delete_sanity (location_t, tree, tree, bool, + int, tsubst_flags_t); +extern tree check_classfn (tree, tree, tree); +extern void check_member_template (tree); +extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *, + tree, bool, tree, tree); +extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *, + tree, tree, tree); +extern tree splice_template_attributes (tree *, tree); +extern bool any_dependent_type_attributes_p (tree); +extern tree cp_reconstruct_complex_type (tree, tree); +extern bool attributes_naming_typedef_ok (tree); +extern void cplus_decl_attributes (tree *, tree, int); +extern void finish_anon_union (tree); +extern void cxx_post_compilation_parsing_cleanups (void); +extern tree coerce_new_type (tree, location_t); +extern void coerce_delete_type (tree, location_t); +extern void comdat_linkage (tree); +extern void determine_visibility (tree); +extern void constrain_class_visibility (tree); +extern void reset_type_linkage (tree); +extern void tentative_decl_linkage (tree); +extern void import_export_decl (tree); +extern tree build_cleanup (tree); +extern tree build_offset_ref_call_from_tree (tree, vec **, + tsubst_flags_t); +extern bool decl_defined_p (tree); +extern bool decl_constant_var_p (tree); +extern bool decl_maybe_constant_var_p (tree); +extern void no_linkage_error (tree); +extern void check_default_args (tree); +extern bool mark_used (tree); +extern bool mark_used (tree, tsubst_flags_t); +extern bool mark_single_function (tree, tsubst_flags_t); +extern void finish_static_data_member_decl (tree, tree, bool, tree, int); +extern tree cp_build_parm_decl (tree, tree, tree); +extern void copy_linkage (tree, tree); +extern tree get_guard (tree); +extern tree get_guard_cond (tree, bool); +extern tree set_guard (tree); +extern tree maybe_get_tls_wrapper_call (tree); +extern void mark_needed (tree); +extern bool decl_needed_p (tree); +extern void note_vague_linkage_fn (tree); +extern void note_variable_template_instantiation (tree); +extern tree build_artificial_parm (tree, tree, tree); +extern bool possibly_inlined_p (tree); +extern int parm_index (tree); +extern tree vtv_start_verification_constructor_init_function (void); +extern tree vtv_finish_verification_constructor_init_function (tree); +extern bool cp_omp_mappable_type (tree); +extern bool cp_omp_emit_unmappable_type_notes (tree); +extern void cp_check_const_attributes (tree); + +/* in error.cc */ +extern const char *type_as_string (tree, int); +extern const char *type_as_string_translate (tree, int); +extern const char *decl_as_string (tree, int); +extern const char *decl_as_string_translate (tree, int); +extern const char *decl_as_dwarf_string (tree, int); +extern const char *expr_as_string (tree, int); +extern const char *expr_to_string (tree); +extern const char *lang_decl_name (tree, int, bool); +extern const char *lang_decl_dwarf_name (tree, int, bool); +extern const char *language_to_string (enum languages); +extern const char *class_key_or_enum_as_string (tree); +extern void maybe_warn_variadic_templates (void); +extern void maybe_warn_cpp0x (cpp0x_warn_str str, + location_t = input_location); +extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); +extern location_t location_of (tree); +extern void qualified_name_lookup_error (tree, tree, tree, + location_t); + +/* in except.cc */ +extern void init_exception_processing (void); +extern tree expand_start_catch_block (tree); +extern void expand_end_catch_block (void); +extern tree build_exc_ptr (void); +extern tree build_throw (location_t, tree); +extern int nothrow_libfn_p (const_tree); +extern void check_handlers (tree); +extern tree finish_noexcept_expr (tree, tsubst_flags_t); +extern bool expr_noexcept_p (tree, tsubst_flags_t); +extern void perform_deferred_noexcept_checks (void); +extern bool nothrow_spec_p (const_tree); +extern bool type_noexcept_p (const_tree); +extern bool type_throw_all_p (const_tree); +extern tree build_noexcept_spec (tree, tsubst_flags_t); +extern void choose_personality_routine (enum languages); +extern tree build_must_not_throw_expr (tree,tree); +extern tree eh_type_info (tree); +extern tree begin_eh_spec_block (void); +extern void finish_eh_spec_block (tree, tree); +extern tree build_eh_type_type (tree); +extern tree cp_protect_cleanup_actions (void); +extern void maybe_splice_retval_cleanup (tree); +extern tree maybe_set_retval_sentinel (void); + +extern tree template_parms_to_args (tree); +extern tree template_parms_level_to_args (tree); +extern tree generic_targs_for (tree); + +/* in expr.cc */ +extern tree cplus_expand_constant (tree); +extern tree mark_use (tree expr, bool rvalue_p, bool read_p, + location_t = UNKNOWN_LOCATION, + bool reject_builtin = true); +extern tree mark_rvalue_use (tree, + location_t = UNKNOWN_LOCATION, + bool reject_builtin = true); +extern tree mark_lvalue_use (tree); +extern tree mark_lvalue_use_nonread (tree); +extern tree mark_type_use (tree); +extern tree mark_discarded_use (tree); +extern void mark_exp_read (tree); + +/* friend.cc */ +extern int is_friend (tree, tree); +extern void make_friend_class (tree, tree, bool); +extern void add_friend (tree, tree, bool); +extern tree do_friend (tree, tree, tree, + enum overload_flags, bool); + +extern void set_global_friend (tree); +extern bool is_global_friend (tree); + +/* in init.cc */ +extern tree expand_member_init (tree); +extern void emit_mem_initializers (tree); +extern tree build_aggr_init (tree, tree, int, + tsubst_flags_t); +extern int is_class_type (tree, int); +extern bool is_copy_initialization (tree); +extern tree build_zero_init (tree, tree, bool); +extern tree build_value_init (tree, tsubst_flags_t); +extern tree build_value_init_noctor (tree, tsubst_flags_t); +extern tree get_nsdmi (tree, bool, tsubst_flags_t); +extern tree build_offset_ref (tree, tree, bool, + tsubst_flags_t); +extern tree throw_bad_array_new_length (void); +extern bool type_has_new_extended_alignment (tree); +extern unsigned malloc_alignment (void); +extern tree build_new_constexpr_heap_type (tree, tree, tree); +extern tree build_new (location_t, + vec **, tree, + tree, vec **, + int, tsubst_flags_t); +extern tree get_temp_regvar (tree, tree); +extern tree build_vec_init (tree, tree, tree, bool, int, + tsubst_flags_t, + vec ** = nullptr); +extern tree build_delete (location_t, tree, tree, + special_function_kind, + int, int, tsubst_flags_t); +extern void push_base_cleanups (void); +extern tree build_vec_delete (location_t, tree, tree, + special_function_kind, int, + tsubst_flags_t); +extern tree create_temporary_var (tree); +extern void initialize_vtbl_ptrs (tree); +extern tree scalar_constant_value (tree); +extern tree decl_constant_value (tree, bool); +extern tree decl_really_constant_value (tree, bool = true); +extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool); +extern tree build_vtbl_address (tree); +extern bool maybe_reject_flexarray_init (tree, tree); + +/* in lex.cc */ +extern void cxx_dup_lang_specific_decl (tree); +extern tree unqualified_name_lookup_error (tree, + location_t = UNKNOWN_LOCATION); +extern tree unqualified_fn_lookup_error (cp_expr); +extern tree make_conv_op_name (tree); +extern tree build_lang_decl (enum tree_code, tree, tree); +extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree); +extern bool maybe_add_lang_decl_raw (tree, bool decomp_p); +extern bool maybe_add_lang_type_raw (tree); +extern void retrofit_lang_decl (tree); +extern void fit_decomposition_lang_decl (tree, tree); +extern tree copy_decl (tree CXX_MEM_STAT_INFO); +extern tree copy_type (tree CXX_MEM_STAT_INFO); +extern tree cxx_make_type (enum tree_code CXX_MEM_STAT_INFO); +extern tree make_class_type (enum tree_code CXX_MEM_STAT_INFO); +extern const char *get_identifier_kind_name (tree); +extern void set_identifier_kind (tree, cp_identifier_kind); +extern bool cxx_init (void); +extern void cxx_finish (void); +extern bool in_main_input_context (void); +extern uintptr_t module_token_pre (cpp_reader *, const cpp_token *, uintptr_t); +extern uintptr_t module_token_cdtor (cpp_reader *, uintptr_t); +extern uintptr_t module_token_lang (int type, int keyword, tree value, + location_t, uintptr_t); + +/* in method.cc */ +extern void init_method (void); +extern tree make_thunk (tree, bool, tree, tree); +extern void finish_thunk (tree); +extern void use_thunk (tree, bool); +extern bool trivial_fn_p (tree); +extern tree forward_parm (tree); +extern bool is_trivially_xible (enum tree_code, tree, tree); +extern bool is_nothrow_xible (enum tree_code, tree, tree); +extern bool is_xible (enum tree_code, tree, tree); +extern tree get_defaulted_eh_spec (tree, tsubst_flags_t = tf_warning_or_error); +extern bool maybe_explain_implicit_delete (tree); +extern void explain_implicit_non_constexpr (tree); +extern bool deduce_inheriting_ctor (tree); +extern bool decl_remember_implicit_trigger_p (tree); +extern void synthesize_method (tree); +extern void maybe_synthesize_method (tree); +extern tree lazily_declare_fn (special_function_kind, + tree); +extern tree skip_artificial_parms_for (const_tree, tree); +extern int num_artificial_parms_for (const_tree); +extern tree make_alias_for (tree, tree); +extern tree get_copy_ctor (tree, tsubst_flags_t); +extern tree get_copy_assign (tree); +extern tree get_default_ctor (tree); +extern tree get_dtor (tree, tsubst_flags_t); +extern tree build_stub_object (tree); +extern tree strip_inheriting_ctors (tree); +extern tree inherited_ctor_binfo (tree); +extern bool base_ctor_omit_inherited_parms (tree); +extern bool ctor_omit_inherited_parms (tree); +extern tree locate_ctor (tree); +extern tree implicitly_declare_fn (special_function_kind, tree, + bool, tree, tree); +/* In module.cc */ +class module_state; /* Forward declare. */ +inline bool modules_p () { return flag_modules != 0; } + +/* The kind of module or part thereof that we're in. */ +enum module_kind_bits +{ + MK_MODULE = 1 << 0, /* This TU is a module. */ + MK_GLOBAL = 1 << 1, /* Entities are in the global module. */ + MK_INTERFACE = 1 << 2, /* This TU is an interface. */ + MK_PARTITION = 1 << 3, /* This TU is a partition. */ + MK_EXPORTING = 1 << 4, /* We are in an export region. */ +}; + +/* We do lots of bit-manipulation, so an unsigned is easier. */ +extern unsigned module_kind; + +/* MK_MODULE & MK_GLOBAL have the following combined meanings: + MODULE GLOBAL + 0 0 not a module + 0 1 GMF of named module (we've not yet seen module-decl) + 1 0 purview of named module + 1 1 header unit. */ + +inline bool module_purview_p () +{ return module_kind & MK_MODULE; } +inline bool global_purview_p () +{ return module_kind & MK_GLOBAL; } + +inline bool not_module_p () +{ return (module_kind & (MK_MODULE | MK_GLOBAL)) == 0; } +inline bool named_module_p () +{ /* This is a named module if exactly one of MODULE and GLOBAL is + set. */ + /* The divides are constant shifts! */ + return ((module_kind / MK_MODULE) ^ (module_kind / MK_GLOBAL)) & 1; +} +inline bool header_module_p () +{ return (module_kind & (MK_MODULE | MK_GLOBAL)) == (MK_MODULE | MK_GLOBAL); } +inline bool named_module_purview_p () +{ return (module_kind & (MK_MODULE | MK_GLOBAL)) == MK_MODULE; } +inline bool module_interface_p () +{ return module_kind & MK_INTERFACE; } +inline bool module_partition_p () +{ return module_kind & MK_PARTITION; } +inline bool module_has_cmi_p () +{ return module_kind & (MK_INTERFACE | MK_PARTITION); } + +/* We're currently exporting declarations. */ +inline bool module_exporting_p () +{ return module_kind & MK_EXPORTING; } + +extern module_state *get_module (tree name, module_state *parent = NULL, + bool partition = false); +extern bool module_may_redeclare (tree decl); + +extern int module_initializer_kind (); +extern void module_add_import_initializers (); + +/* Where the namespace-scope decl was originally declared. */ +extern void set_originating_module (tree, bool friend_p = false); +extern tree get_originating_module_decl (tree) ATTRIBUTE_PURE; +extern int get_originating_module (tree, bool for_mangle = false) ATTRIBUTE_PURE; +extern unsigned get_importing_module (tree, bool = false) ATTRIBUTE_PURE; + +/* Where current instance of the decl got declared/defined/instantiated. */ +extern void set_instantiating_module (tree); +extern void set_defining_module (tree); +extern void maybe_attach_decl (tree ctx, tree decl); + +extern void mangle_module (int m, bool include_partition); +extern void mangle_module_fini (); +extern void lazy_load_binding (unsigned mod, tree ns, tree id, + binding_slot *bslot); +extern void lazy_load_pendings (tree decl); +extern module_state *preprocess_module (module_state *, location_t, + bool in_purview, + bool is_import, bool export_p, + cpp_reader *reader); +extern void preprocessed_module (cpp_reader *reader); +extern void import_module (module_state *, location_t, bool export_p, + tree attr, cpp_reader *); +extern void declare_module (module_state *, location_t, bool export_p, + tree attr, cpp_reader *); +extern void init_modules (cpp_reader *); +extern void fini_modules (); +extern void maybe_check_all_macros (cpp_reader *); +extern void finish_module_processing (cpp_reader *); +extern char const *module_name (unsigned, bool header_ok); +extern bitmap get_import_bitmap (); +extern bitmap visible_instantiation_path (bitmap *); +extern void module_begin_main_file (cpp_reader *, line_maps *, + const line_map_ordinary *); +extern void module_preprocess_options (cpp_reader *); +extern bool handle_module_option (unsigned opt, const char *arg, int value); + +/* In optimize.cc */ +extern bool maybe_clone_body (tree); + +/* In parser.cc */ +extern tree cp_convert_range_for (tree, tree, tree, tree, unsigned int, bool, + unsigned short); +extern void cp_convert_omp_range_for (tree &, vec *, tree &, + tree &, tree &, tree &, tree &, tree &); +extern void cp_finish_omp_range_for (tree, tree); +extern bool parsing_nsdmi (void); +extern bool parsing_function_declarator (); +extern bool parsing_default_capturing_generic_lambda_in_template (void); +extern void inject_this_parameter (tree, cp_cv_quals); +extern location_t defparse_location (tree); +extern void maybe_show_extern_c_location (void); +extern bool literal_integer_zerop (const_tree); + +/* in pt.cc */ +extern tree canonical_type_parameter (tree); +extern void push_access_scope (tree); +extern void pop_access_scope (tree); +extern bool check_template_shadow (tree); +extern bool check_auto_in_tmpl_args (tree, tree); +extern tree get_innermost_template_args (tree, int); +extern void maybe_begin_member_template_processing (tree); +extern void maybe_end_member_template_processing (void); +extern tree finish_member_template_decl (tree); +extern void begin_template_parm_list (void); +extern bool begin_specialization (void); +extern void reset_specialization (void); +extern void end_specialization (void); +extern void begin_explicit_instantiation (void); +extern void end_explicit_instantiation (void); +extern void check_unqualified_spec_or_inst (tree, location_t); +extern tree check_explicit_specialization (tree, tree, int, int, + tree = NULL_TREE); +extern int num_template_headers_for_class (tree); +extern void check_template_variable (tree); +extern tree make_auto (void); +extern tree make_decltype_auto (void); +extern tree make_constrained_auto (tree, tree); +extern tree make_constrained_decltype_auto (tree, tree); +extern tree make_template_placeholder (tree); +extern bool template_placeholder_p (tree); +extern bool ctad_template_p (tree); +extern tree do_auto_deduction (tree, tree, tree, + tsubst_flags_t + = tf_warning_or_error, + auto_deduction_context + = adc_unspecified, + tree = NULL_TREE, + int = LOOKUP_NORMAL); +extern tree type_uses_auto (tree); +extern tree type_uses_auto_or_concept (tree); +extern void append_type_to_template_for_access_check (tree, tree, tree, + location_t); +extern tree convert_generic_types_to_packs (tree, int, int); +extern tree splice_late_return_type (tree, tree); +extern bool is_auto (const_tree); +extern tree process_template_parm (tree, location_t, tree, + bool, bool); +extern tree end_template_parm_list (tree); +extern void end_template_parm_list (void); +extern void end_template_decl (void); +extern tree maybe_update_decl_type (tree, tree); +extern bool check_default_tmpl_args (tree, tree, bool, bool, int); +extern tree push_template_decl (tree, bool is_friend = false); +extern tree add_inherited_template_parms (tree, tree); +extern void template_parm_level_and_index (tree, int*, int*); +extern bool redeclare_class_template (tree, tree, tree); +extern tree lookup_template_class (tree, tree, tree, tree, + int, tsubst_flags_t); +extern tree lookup_template_function (tree, tree); +extern tree lookup_template_variable (tree, tree); +extern int uses_template_parms (tree); +extern bool uses_template_parms_level (tree, int); +extern bool uses_outer_template_parms_in_constraints (tree); +extern bool in_template_function (void); +extern bool need_generic_capture (void); +extern tree instantiate_class_template (tree); +extern tree instantiate_template (tree, tree, tsubst_flags_t); +extern tree fn_type_unification (tree, tree, tree, + const tree *, unsigned int, + tree, unification_kind_t, int, + struct conversion **, + bool, bool); +extern void mark_decl_instantiated (tree, int); +extern int more_specialized_fn (tree, tree, int); +extern void do_decl_instantiation (tree, tree); +extern void do_type_instantiation (tree, tree, tsubst_flags_t); +extern bool always_instantiate_p (tree); +extern bool maybe_instantiate_noexcept (tree, tsubst_flags_t = tf_warning_or_error); +extern tree instantiate_decl (tree, bool, bool); +extern void maybe_instantiate_decl (tree); +extern int comp_template_parms (const_tree, const_tree); +extern bool template_heads_equivalent_p (const_tree, const_tree); +extern bool builtin_pack_fn_p (tree); +extern tree uses_parameter_packs (tree); +extern bool template_parameter_pack_p (const_tree); +extern bool function_parameter_pack_p (const_tree); +extern bool function_parameter_expanded_from_pack_p (tree, tree); +extern tree make_pack_expansion (tree, tsubst_flags_t = tf_warning_or_error); +extern bool check_for_bare_parameter_packs (tree, location_t = UNKNOWN_LOCATION); +extern tree build_template_info (tree, tree); +extern tree get_template_info (const_tree); +extern int template_class_depth (tree); +extern int is_specialization_of (tree, tree); +extern bool is_specialization_of_friend (tree, tree); +extern int comp_template_args (tree, tree, tree * = NULL, + tree * = NULL, bool = false); +extern int template_args_equal (tree, tree, bool = false); +extern tree maybe_process_partial_specialization (tree); +extern tree most_specialized_instantiation (tree); +extern tree most_specialized_partial_spec (tree, tsubst_flags_t); +extern void print_candidates (tree); +extern void instantiate_pending_templates (int); +extern tree tsubst_default_argument (tree, int, tree, tree, + tsubst_flags_t); +extern tree tsubst (tree, tree, tsubst_flags_t, tree); +extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t, + tree, bool = false, bool = false); +extern tree tsubst_expr (tree, tree, tsubst_flags_t, + tree, bool); +extern tree tsubst_pack_expansion (tree, tree, tsubst_flags_t, tree); +extern tree tsubst_argument_pack (tree, tree, tsubst_flags_t, tree); +extern tree tsubst_template_args (tree, tree, tsubst_flags_t, tree); +extern tree tsubst_template_arg (tree, tree, tsubst_flags_t, tree); +extern tree tsubst_function_parms (tree, tree, tsubst_flags_t, tree); +extern tree most_general_template (tree); +extern tree get_mostly_instantiated_function_type (tree); +extern bool problematic_instantiation_changed (void); +extern void record_last_problematic_instantiation (void); +extern struct tinst_level *current_instantiation(void); +extern bool instantiating_current_function_p (void); +extern tree maybe_get_template_decl_from_type_decl (tree); +extern int processing_template_parmlist; +extern bool dependent_type_p (tree); +extern bool dependent_scope_p (tree); +extern bool dependentish_scope_p (tree); +extern bool any_dependent_template_arguments_p (const_tree); +extern bool any_erroneous_template_args_p (const_tree); +extern bool dependent_template_p (tree); +extern bool dependent_template_id_p (tree, tree); +extern bool type_dependent_expression_p (tree); +extern bool type_dependent_object_expression_p (tree); +extern bool any_type_dependent_arguments_p (const vec *); +extern bool any_type_dependent_elements_p (const_tree); +extern bool type_dependent_expression_p_push (tree); +extern bool value_dependent_expression_p (tree); +extern bool instantiation_dependent_expression_p (tree); +extern bool instantiation_dependent_uneval_expression_p (tree); +extern bool any_value_dependent_elements_p (const_tree); +extern bool dependent_omp_for_p (tree, tree, tree, tree); +extern tree resolve_typename_type (tree, bool); +extern tree template_for_substitution (tree); +extern tree build_non_dependent_expr (tree); +extern void make_args_non_dependent (vec *); +extern bool reregister_specialization (tree, tree, tree); +extern tree instantiate_non_dependent_expr (tree); +extern tree instantiate_non_dependent_expr_sfinae (tree, tsubst_flags_t); +extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t); +extern tree instantiate_non_dependent_or_null (tree); +extern bool variable_template_specialization_p (tree); +extern bool alias_type_or_template_p (tree); +enum { nt_opaque = false, nt_transparent = true }; +extern tree alias_template_specialization_p (const_tree, bool); +extern tree dependent_alias_template_spec_p (const_tree, bool); +extern bool template_parm_object_p (const_tree); +extern tree tparm_object_argument (tree); +extern bool explicit_class_specialization_p (tree); +extern bool push_tinst_level (tree); +extern bool push_tinst_level (tree, tree); +extern bool push_tinst_level_loc (tree, location_t); +extern bool push_tinst_level_loc (tree, tree, location_t); +extern void pop_tinst_level (void); +extern struct tinst_level *outermost_tinst_level(void); +extern bool non_templated_friend_p (tree); +extern void init_template_processing (void); +extern void print_template_statistics (void); +bool template_template_parameter_p (const_tree); +bool template_type_parameter_p (const_tree); +extern bool primary_template_specialization_p (const_tree); +extern tree get_primary_template_innermost_parameters (const_tree); +extern tree get_template_innermost_arguments (const_tree); +extern tree get_template_argument_pack_elems (const_tree); +extern tree get_function_template_decl (const_tree); +extern tree resolve_nondeduced_context (tree, tsubst_flags_t); +extern tree resolve_nondeduced_context_or_error (tree, tsubst_flags_t); +extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val); +extern tree coerce_template_parms (tree, tree, tree); +extern tree coerce_template_parms (tree, tree, tree, tsubst_flags_t); +extern tree canonicalize_type_argument (tree, tsubst_flags_t); +extern void register_local_specialization (tree, tree); +extern tree retrieve_local_specialization (tree); +extern tree extract_fnparm_pack (tree, tree *); +extern tree template_parm_to_arg (tree); +extern tree dguide_name (tree); +extern bool dguide_name_p (tree); +extern bool deduction_guide_p (const_tree); +extern bool copy_guide_p (const_tree); +extern bool template_guide_p (const_tree); +extern bool builtin_guide_p (const_tree); +extern void store_explicit_specifier (tree, tree); +extern tree lookup_explicit_specifier (tree); +extern void walk_specializations (bool, + void (*)(bool, spec_entry *, + void *), + void *); +extern tree match_mergeable_specialization (bool is_decl, spec_entry *); +extern unsigned get_mergeable_specialization_flags (tree tmpl, tree spec); +extern void add_mergeable_specialization (bool is_decl, bool is_alias, + spec_entry *, + tree outer, unsigned); +extern tree add_to_template_args (tree, tree); +extern tree add_outermost_template_args (tree, tree); +extern tree add_extra_args (tree, tree, tsubst_flags_t, tree); +extern tree build_extra_args (tree, tree, tsubst_flags_t); + +/* in rtti.cc */ +/* A vector of all tinfo decls that haven't been emitted yet. */ +extern GTY(()) vec *unemitted_tinfo_decls; + +extern void init_rtti_processing (void); +extern tree build_typeid (tree, tsubst_flags_t); +extern tree get_tinfo_decl_direct (tree, tree, int); +extern tree get_tinfo_decl (tree); +extern tree get_typeid (tree, tsubst_flags_t); +extern tree build_headof (tree); +extern tree build_dynamic_cast (location_t, tree, tree, + tsubst_flags_t); +extern void emit_support_tinfos (void); +extern bool emit_tinfo_decl (tree); +extern unsigned get_pseudo_tinfo_index (tree); +extern tree get_pseudo_tinfo_type (unsigned); +extern tree build_if_nonnull (tree, tree, tsubst_flags_t); + +/* in search.cc */ +extern tree get_parent_with_private_access (tree decl, tree binfo); +extern bool accessible_base_p (tree, tree, bool); +extern tree lookup_base (tree, tree, base_access, + base_kind *, tsubst_flags_t); +extern tree dcast_base_hint (tree, tree); +extern int accessible_p (tree, tree, bool); +extern int accessible_in_template_p (tree, tree); +extern tree lookup_field (tree, tree, int, bool); +extern tree lookup_fnfields (tree, tree, int, tsubst_flags_t); +extern tree lookup_member (tree, tree, int, bool, + tsubst_flags_t, + access_failure_info *afi = NULL); +extern tree lookup_member_fuzzy (tree, tree, bool); +extern tree locate_field_accessor (tree, tree, bool); +extern int look_for_overrides (tree, tree); +extern void get_pure_virtuals (tree); +extern void maybe_suppress_debug_info (tree); +extern void note_debug_info_needed (tree); +extern tree current_scope (void); +extern int at_function_scope_p (void); +extern bool at_class_scope_p (void); +extern bool at_namespace_scope_p (void); +extern tree context_for_name_lookup (tree); +extern tree lookup_conversions (tree); +extern tree binfo_from_vbase (tree); +extern tree binfo_for_vbase (tree, tree); +extern tree look_for_overrides_here (tree, tree); +#define dfs_skip_bases ((tree)1) +extern tree dfs_walk_all (tree, tree (*) (tree, void *), + tree (*) (tree, void *), void *); +extern tree dfs_walk_once (tree, tree (*) (tree, void *), + tree (*) (tree, void *), void *); +extern tree binfo_via_virtual (tree, tree); +extern bool binfo_direct_p (tree); +extern tree build_baselink (tree, tree, tree, tree); +extern tree adjust_result_of_qualified_name_lookup + (tree, tree, tree); +extern tree copied_binfo (tree, tree); +extern tree original_binfo (tree, tree); +extern bool shared_member_p (tree); +extern bool any_dependent_bases_p (tree = current_nonlambda_class_type ()); +extern bool maybe_check_overriding_exception_spec (tree, tree); + +/* in semantics.cc */ +extern void push_deferring_access_checks (deferring_kind); +extern void resume_deferring_access_checks (void); +extern void stop_deferring_access_checks (void); +extern void pop_deferring_access_checks (void); +extern vec *get_deferred_access_checks (void); +extern void reopen_deferring_access_checks (vec *); +extern void pop_to_parent_deferring_access_checks (void); +extern bool perform_access_checks (vec *, + tsubst_flags_t); +extern bool perform_deferred_access_checks (tsubst_flags_t); +extern bool perform_or_defer_access_check (tree, tree, tree, + tsubst_flags_t, + access_failure_info *afi = NULL); + +/* RAII sentinel to ensures that deferred access checks are popped before + a function returns. */ + +class deferring_access_check_sentinel +{ +public: + deferring_access_check_sentinel (enum deferring_kind kind = dk_deferred) + { + push_deferring_access_checks (kind); + } + ~deferring_access_check_sentinel () + { + pop_deferring_access_checks (); + } +}; + +extern int stmts_are_full_exprs_p (void); +extern void init_cp_semantics (void); +extern tree do_poplevel (tree); +extern void break_maybe_infinite_loop (void); +extern void add_decl_expr (tree); +extern tree maybe_cleanup_point_expr_void (tree); +extern tree finish_expr_stmt (tree); +extern tree begin_if_stmt (void); +extern tree finish_if_stmt_cond (tree, tree); +extern tree finish_then_clause (tree); +extern void begin_else_clause (tree); +extern void finish_else_clause (tree); +extern void finish_if_stmt (tree); +extern tree begin_while_stmt (void); +extern void finish_while_stmt_cond (tree, tree, bool, unsigned short); +extern void finish_while_stmt (tree); +extern tree begin_do_stmt (void); +extern void finish_do_body (tree); +extern void finish_do_stmt (tree, tree, bool, unsigned short); +extern tree finish_return_stmt (tree); +extern tree begin_for_scope (tree *); +extern tree begin_for_stmt (tree, tree); +extern void finish_init_stmt (tree); +extern void finish_for_cond (tree, tree, bool, unsigned short); +extern void finish_for_expr (tree, tree); +extern void finish_for_stmt (tree); +extern tree begin_range_for_stmt (tree, tree); +extern void finish_range_for_decl (tree, tree, tree); +extern void finish_range_for_stmt (tree); +extern tree finish_break_stmt (void); +extern tree finish_continue_stmt (void); +extern tree begin_switch_stmt (void); +extern void finish_switch_cond (tree, tree); +extern void finish_switch_stmt (tree); +extern tree finish_goto_stmt (tree); +extern tree begin_try_block (void); +extern void finish_try_block (tree); +extern void finish_handler_sequence (tree); +extern tree begin_function_try_block (tree *); +extern void finish_function_try_block (tree); +extern void finish_function_handler_sequence (tree, tree); +extern void finish_cleanup_try_block (tree); +extern tree begin_handler (void); +extern void finish_handler_parms (tree, tree); +extern void finish_handler (tree); +extern void finish_cleanup (tree, tree); +extern bool is_this_parameter (tree); + +enum { + BCS_NORMAL = 0, + BCS_NO_SCOPE = 1, + BCS_TRY_BLOCK = 2, + BCS_FN_BODY = 4, + BCS_TRANSACTION = 8 +}; +extern tree begin_compound_stmt (unsigned int); + +extern void finish_compound_stmt (tree); +extern tree finish_asm_stmt (location_t, int, tree, tree, + tree, tree, tree, bool); +extern tree finish_label_stmt (tree); +extern void finish_label_decl (tree); +extern cp_expr finish_parenthesized_expr (cp_expr); +extern tree force_paren_expr (tree, bool = false); +inline tree force_paren_expr_uneval (tree t) +{ return force_paren_expr (t, true); } +extern tree maybe_undo_parenthesized_ref (tree); +extern tree maybe_strip_ref_conversion (tree); +extern tree finish_non_static_data_member (tree, tree, tree); +extern tree begin_stmt_expr (void); +extern tree finish_stmt_expr_expr (tree, tree); +extern tree finish_stmt_expr (tree, bool); +extern tree stmt_expr_value_expr (tree); +bool empty_expr_stmt_p (tree); +extern cp_expr perform_koenig_lookup (cp_expr, vec *, + tsubst_flags_t); +extern tree finish_call_expr (tree, vec **, bool, + bool, tsubst_flags_t); +extern tree lookup_and_finish_template_variable (tree, tree, tsubst_flags_t = tf_warning_or_error); +extern tree finish_template_variable (tree, tsubst_flags_t = tf_warning_or_error); +extern cp_expr finish_increment_expr (cp_expr, enum tree_code); +extern tree finish_this_expr (void); +extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t); +extern cp_expr finish_unary_op_expr (location_t, enum tree_code, cp_expr, + tsubst_flags_t); +/* Whether this call to finish_compound_literal represents a C++11 functional + cast or a C99 compound literal. */ +enum fcl_t { fcl_functional, fcl_c99 }; +extern tree finish_compound_literal (tree, tree, tsubst_flags_t, fcl_t = fcl_functional); +extern tree finish_fname (tree); +extern void finish_translation_unit (void); +extern tree finish_template_type_parm (tree, tree); +extern tree finish_template_template_parm (tree, tree); +extern tree begin_class_definition (tree); +extern void finish_template_decl (tree); +extern tree finish_template_type (tree, tree, int); +extern tree finish_base_specifier (tree, tree, bool); +extern void finish_member_declaration (tree); +extern bool outer_automatic_var_p (tree); +extern tree process_outer_var_ref (tree, tsubst_flags_t, bool force_use = false); +extern cp_expr finish_id_expression (tree, tree, tree, + cp_id_kind *, + bool, bool, bool *, + bool, bool, bool, bool, + const char **, + location_t); +extern tree finish_typeof (tree); +extern tree finish_underlying_type (tree); +extern tree calculate_bases (tree, tsubst_flags_t); +extern tree finish_bases (tree, bool); +extern tree calculate_direct_bases (tree, tsubst_flags_t); +extern tree finish_offsetof (tree, tree, location_t); +extern void finish_decl_cleanup (tree, tree); +extern void finish_eh_cleanup (tree); +extern void emit_associated_thunks (tree); +extern void finish_mem_initializers (tree); +extern tree check_template_template_default_arg (tree); +extern bool expand_or_defer_fn_1 (tree); +extern void expand_or_defer_fn (tree); +extern bool check_accessibility_of_qualified_id (tree, tree, tree, tsubst_flags_t); +extern tree finish_qualified_id_expr (tree, tree, bool, bool, + bool, bool, tsubst_flags_t); +extern void simplify_aggr_init_expr (tree *); +extern void finalize_nrv (tree *, tree, tree); +extern tree omp_reduction_id (enum tree_code, tree, tree); +extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *); +extern bool cp_check_omp_declare_reduction (tree); +extern void finish_omp_declare_simd_methods (tree); +extern tree finish_omp_clauses (tree, enum c_omp_region_type); +extern tree push_omp_privatization_clauses (bool); +extern void pop_omp_privatization_clauses (tree); +extern void save_omp_privatization_clauses (vec &); +extern void restore_omp_privatization_clauses (vec &); +extern void finish_omp_threadprivate (tree); +extern tree begin_omp_structured_block (void); +extern tree finish_omp_structured_block (tree); +extern tree finish_oacc_data (tree, tree); +extern tree finish_oacc_host_data (tree, tree); +extern tree finish_omp_construct (enum tree_code, tree, tree); +extern tree begin_omp_parallel (void); +extern tree finish_omp_parallel (tree, tree); +extern tree begin_omp_task (void); +extern tree finish_omp_task (tree, tree); +extern tree finish_omp_for (location_t, enum tree_code, + tree, tree, tree, tree, tree, + tree, tree, vec *, tree); +extern tree finish_omp_for_block (tree, tree); +extern void finish_omp_atomic (location_t, enum tree_code, + enum tree_code, tree, tree, + tree, tree, tree, tree, tree, + enum omp_memory_order, bool); +extern void finish_omp_barrier (void); +extern void finish_omp_depobj (location_t, tree, + enum omp_clause_depend_kind, + tree); +extern void finish_omp_flush (int); +extern void finish_omp_taskwait (void); +extern void finish_omp_taskyield (void); +extern void finish_omp_cancel (tree); +extern void finish_omp_cancellation_point (tree); +extern tree omp_privatize_field (tree, bool); +extern tree begin_transaction_stmt (location_t, tree *, int); +extern void finish_transaction_stmt (tree, tree, int, tree); +extern tree build_transaction_expr (location_t, tree, int, tree); +extern bool cxx_omp_create_clause_info (tree, tree, bool, bool, + bool, bool); +extern tree baselink_for_fns (tree); +extern void finish_static_assert (tree, tree, location_t, + bool, bool); +extern tree finish_decltype_type (tree, bool, tsubst_flags_t); +extern tree fold_builtin_is_corresponding_member (location_t, int, tree *); +extern tree fold_builtin_is_pointer_inverconvertible_with_class (location_t, int, tree *); +extern tree finish_trait_expr (location_t, enum cp_trait_kind, tree, tree); +extern tree build_lambda_expr (void); +extern tree build_lambda_object (tree); +extern tree begin_lambda_type (tree); +extern tree lambda_capture_field_type (tree, bool, bool); +extern tree lambda_proxy_type (tree); +extern tree lambda_function (tree); +extern void apply_deduced_return_type (tree, tree); +extern tree add_capture (tree, tree, tree, bool, bool); +extern tree add_default_capture (tree, tree, tree); +extern void insert_capture_proxy (tree); +extern void insert_pending_capture_proxies (void); +extern bool is_capture_proxy (tree); +extern bool is_normal_capture_proxy (tree); +extern bool is_constant_capture_proxy (tree); +extern void register_capture_members (tree); +extern tree lambda_expr_this_capture (tree, int); +extern void maybe_generic_this_capture (tree, tree); +extern tree maybe_resolve_dummy (tree, bool); +extern tree current_nonlambda_function (void); +extern tree nonlambda_method_basetype (void); +extern tree current_nonlambda_scope (void); +extern tree current_lambda_expr (void); +extern bool generic_lambda_fn_p (tree); +extern tree do_dependent_capture (tree, bool = false); +extern bool lambda_fn_in_template_p (tree); +extern void maybe_add_lambda_conv_op (tree); +extern bool is_lambda_ignored_entity (tree); +extern bool lambda_static_thunk_p (tree); +extern bool call_from_lambda_thunk_p (tree); +extern tree finish_builtin_launder (location_t, tree, + tsubst_flags_t); +extern tree cp_build_vec_convert (tree, location_t, tree, + tsubst_flags_t); +extern tree cp_build_bit_cast (location_t, tree, tree, + tsubst_flags_t); +extern void start_lambda_scope (tree); +extern void record_lambda_scope (tree); +extern void record_null_lambda_scope (tree); +extern void finish_lambda_scope (void); +extern tree start_lambda_function (tree fn, tree lambda_expr); +extern void finish_lambda_function (tree body); +extern bool regenerated_lambda_fn_p (tree); +extern tree lambda_regenerating_args (tree); +extern tree most_general_lambda (tree); +extern tree finish_omp_target (location_t, tree, tree, bool); +extern void finish_omp_target_clauses (location_t, tree, tree *); + +/* in tree.cc */ +extern int cp_tree_operand_length (const_tree); +extern int cp_tree_code_length (enum tree_code); +extern void cp_free_lang_data (tree t); +extern tree force_target_expr (tree, tree, tsubst_flags_t); +extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t); +extern void lang_check_failed (const char *, int, + const char *) ATTRIBUTE_NORETURN + ATTRIBUTE_COLD; +extern tree stabilize_expr (tree, tree *); +extern void stabilize_call (tree, tree *); +extern bool stabilize_init (tree, tree *); +extern tree add_stmt_to_compound (tree, tree); +extern void init_tree (void); +extern bool pod_type_p (const_tree); +extern bool layout_pod_type_p (const_tree); +extern bool std_layout_type_p (const_tree); +extern bool trivial_type_p (const_tree); +extern bool trivially_copyable_p (const_tree); +extern bool type_has_unique_obj_representations (const_tree); +extern bool scalarish_type_p (const_tree); +extern bool structural_type_p (tree, bool = false); +extern bool type_has_nontrivial_default_init (const_tree); +extern bool type_has_nontrivial_copy_init (const_tree); +extern void maybe_warn_parm_abi (tree, location_t); +extern bool class_tmpl_impl_spec_p (const_tree); +extern int zero_init_p (const_tree); +extern bool zero_init_expr_p (tree); +extern bool check_abi_tag_redeclaration (const_tree, const_tree, + const_tree); +extern bool check_abi_tag_args (tree, tree); +extern tree strip_typedefs (tree, bool * = NULL, + unsigned int = 0); +extern tree strip_typedefs_expr (tree, bool * = NULL, + unsigned int = 0); +extern tree copy_binfo (tree, tree, tree, + tree *, int); +extern int member_p (const_tree); +extern cp_lvalue_kind real_lvalue_p (const_tree); +extern cp_lvalue_kind lvalue_kind (const_tree); +extern bool glvalue_p (const_tree); +extern bool obvalue_p (const_tree); +extern bool xvalue_p (const_tree); +extern bool bitfield_p (const_tree); +extern tree cp_stabilize_reference (tree); +extern bool builtin_valid_in_constant_expr_p (const_tree); +extern tree build_min (enum tree_code, tree, ...); +extern tree build_min_nt_loc (location_t, enum tree_code, + ...); +extern tree build_min_non_dep (enum tree_code, tree, ...); +extern tree build_min_non_dep_op_overload (enum tree_code, tree, tree, ...); +extern tree build_min_non_dep_op_overload (tree, tree, tree, + vec *); +extern tree build_min_nt_call_vec (tree, vec *); +extern tree build_min_non_dep_call_vec (tree, tree, vec *); +extern vec* vec_copy_and_insert (vec*, tree, unsigned); +extern tree build_cplus_new (tree, tree, tsubst_flags_t); +extern tree build_local_temp (tree); +extern bool is_local_temp (tree); +extern tree build_aggr_init_expr (tree, tree); +extern tree get_target_expr (tree); +extern tree get_target_expr_sfinae (tree, tsubst_flags_t); +extern tree build_cplus_array_type (tree, tree, int is_dep = -1); +extern tree build_array_of_n_type (tree, int); +extern bool array_of_runtime_bound_p (tree); +extern bool vla_type_p (tree); +extern tree build_array_copy (tree); +extern tree build_vec_init_expr (tree, tree, tsubst_flags_t); +extern tree expand_vec_init_expr (tree, tree, tsubst_flags_t, + vec** = nullptr); +extern void diagnose_non_constexpr_vec_init (tree); +extern tree hash_tree_cons (tree, tree, tree); +extern tree hash_tree_chain (tree, tree); +extern tree build_qualified_name (tree, tree, tree, bool); +extern tree build_ref_qualified_type (tree, cp_ref_qualifier); +extern tree make_binding_vec (tree, unsigned clusters CXX_MEM_STAT_INFO); +inline tree ovl_first (tree) ATTRIBUTE_PURE; +extern tree ovl_make (tree fn, + tree next = NULL_TREE); +extern tree ovl_insert (tree fn, tree maybe_ovl, + int using_or_hidden = 0); +extern tree ovl_skip_hidden (tree) ATTRIBUTE_PURE; +extern void lookup_mark (tree lookup, bool val); +extern tree lookup_add (tree fns, tree lookup); +extern tree lookup_maybe_add (tree fns, tree lookup, + bool deduping); +extern int is_overloaded_fn (tree) ATTRIBUTE_PURE; +extern bool really_overloaded_fn (tree) ATTRIBUTE_PURE; +extern tree dependent_name (tree); +extern tree maybe_get_fns (tree) ATTRIBUTE_PURE; +extern tree get_fns (tree) ATTRIBUTE_PURE; +extern tree get_first_fn (tree) ATTRIBUTE_PURE; +extern tree ovl_scope (tree); +extern const char *cxx_printable_name (tree, int); +extern const char *cxx_printable_name_translate (tree, int); +extern tree canonical_eh_spec (tree); +extern tree build_cp_fntype_variant (tree, cp_ref_qualifier, tree, bool); +extern tree build_exception_variant (tree, tree); +extern void fixup_deferred_exception_variants (tree, tree); +extern tree bind_template_template_parm (tree, tree); +extern tree array_type_nelts_total (tree); +extern tree array_type_nelts_top (tree); +extern bool array_of_unknown_bound_p (const_tree); +extern bool source_location_current_p (tree); +extern tree break_out_target_exprs (tree, bool = false); +extern tree build_ctor_subob_ref (tree, tree, tree); +extern tree replace_placeholders (tree, tree, bool * = NULL); +extern bool find_placeholders (tree); +extern tree get_type_decl (tree); +extern tree decl_namespace_context (tree); +extern bool decl_anon_ns_mem_p (const_tree); +extern tree lvalue_type (tree); +extern tree error_type (tree); +extern int varargs_function_p (const_tree); +extern bool cp_tree_equal (tree, tree); +extern tree no_linkage_check (tree, bool); +extern void debug_binfo (tree); +extern tree build_dummy_object (tree); +extern tree maybe_dummy_object (tree, tree *); +extern bool is_dummy_object (const_tree); +extern bool is_byte_access_type (tree); +extern bool is_byte_access_type_not_plain_char (tree); +extern const struct attribute_spec cxx_attribute_table[]; +extern tree make_ptrmem_cst (tree, tree); +extern tree cp_build_type_attribute_variant (tree, tree); +extern tree cp_build_reference_type (tree, bool); +extern tree move (tree); +extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t); +#define cp_build_qualified_type(TYPE, QUALS) \ + cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error) +extern bool cv_qualified_p (const_tree); +extern tree cv_unqualified (tree); +extern special_function_kind special_function_p (const_tree); +extern special_function_kind special_memfn_p (const_tree); +extern int count_trees (tree); +extern int char_type_p (tree); +extern void verify_stmt_tree (tree); +extern linkage_kind decl_linkage (tree); +extern duration_kind decl_storage_duration (tree); +extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn, + void*, hash_set *); +#define cp_walk_tree(tp,func,data,pset) \ + walk_tree_1 (tp, func, data, pset, cp_walk_subtrees) +#define cp_walk_tree_without_duplicates(tp,func,data) \ + walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees) +extern tree rvalue (tree); +extern tree convert_bitfield_to_declared_type (tree); +extern tree cp_save_expr (tree); +extern bool cast_valid_in_integral_constant_expression_p (tree); +extern bool cxx_type_hash_eq (const_tree, const_tree); +extern tree cxx_copy_lang_qualifiers (const_tree, const_tree); + +extern void cxx_print_statistics (void); +extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t); + +/* in ptree.cc */ +extern void cxx_print_xnode (FILE *, tree, int); +extern void cxx_print_decl (FILE *, tree, int); +extern void cxx_print_type (FILE *, tree, int); +extern void cxx_print_identifier (FILE *, tree, int); +extern void cxx_print_error_function (diagnostic_context *, + const char *, + struct diagnostic_info *); + +/* in typeck.cc */ +/* Says how we should behave when comparing two arrays one of which + has unknown bounds. */ +enum compare_bounds_t { bounds_none, bounds_either, bounds_first }; + +extern bool cxx_mark_addressable (tree, bool = false); +extern int string_conv_p (const_tree, const_tree, int); +extern tree cp_truthvalue_conversion (tree, tsubst_flags_t); +extern tree contextual_conv_bool (tree, tsubst_flags_t); +extern tree condition_conversion (tree); +extern tree require_complete_type (tree); +extern tree require_complete_type_sfinae (tree, tsubst_flags_t); +extern tree complete_type (tree); +extern tree complete_type_or_else (tree, tree); +extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t); +inline bool type_unknown_p (const_tree); +enum { ce_derived, ce_type, ce_normal, ce_exact }; +extern bool comp_except_specs (const_tree, const_tree, int); +extern bool comptypes (tree, tree, int); +extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree); +extern bool similar_type_p (tree, tree); +extern bool next_common_initial_seqence (tree &, tree &); +extern bool layout_compatible_type_p (tree, tree); +extern bool compparms (const_tree, const_tree); +extern int comp_cv_qualification (const_tree, const_tree); +extern int comp_cv_qualification (int, int); +extern int comp_cv_qual_signature (tree, tree); +extern tree cxx_sizeof_or_alignof_expr (location_t, tree, + enum tree_code, bool, bool); +extern tree cxx_sizeof_or_alignof_type (location_t, tree, + enum tree_code, bool, bool); +extern tree cxx_alignas_expr (tree); +extern tree cxx_sizeof_nowarn (tree); +extern tree is_bitfield_expr_with_lowered_type (const_tree); +extern tree unlowered_expr_type (const_tree); +extern tree decay_conversion (tree, + tsubst_flags_t, + bool = true); +extern tree build_class_member_access_expr (cp_expr, tree, tree, bool, + tsubst_flags_t); +extern tree finish_class_member_access_expr (cp_expr, tree, bool, + tsubst_flags_t); +extern tree lookup_destructor (tree, tree, tree, tsubst_flags_t); +extern tree build_dependent_operator_type (tree, enum tree_code, bool); +extern tree build_x_indirect_ref (location_t, tree, + ref_operator, tree, + tsubst_flags_t); +extern tree cp_build_indirect_ref (location_t, tree, + ref_operator, + tsubst_flags_t); +extern tree cp_build_fold_indirect_ref (tree); +extern tree build_array_ref (location_t, tree, tree); +extern tree cp_build_array_ref (location_t, tree, tree, + tsubst_flags_t); +extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t); +extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...) + ATTRIBUTE_SENTINEL; +extern tree cp_build_function_call_vec (tree, vec **, + tsubst_flags_t, + tree = NULL_TREE); +extern tree build_x_binary_op (const op_location_t &, + enum tree_code, tree, + enum tree_code, tree, + enum tree_code, tree, + tree *, tsubst_flags_t); +inline tree build_x_binary_op (const op_location_t &loc, + enum tree_code code, tree arg1, tree arg2, + tsubst_flags_t complain) +{ + return build_x_binary_op (loc, code, arg1, TREE_CODE (arg1), arg2, + TREE_CODE (arg2), NULL_TREE, NULL, complain); +} +extern tree build_x_array_ref (location_t, tree, tree, + tsubst_flags_t); +extern tree build_x_unary_op (location_t, + enum tree_code, cp_expr, + tree, tsubst_flags_t); +extern tree cp_build_addressof (location_t, tree, + tsubst_flags_t); +extern tree cp_build_addr_expr (tree, tsubst_flags_t); +extern tree cp_build_unary_op (enum tree_code, tree, bool, + tsubst_flags_t); +extern tree genericize_compound_lvalue (tree); +extern tree unary_complex_lvalue (enum tree_code, tree); +extern tree build_x_conditional_expr (location_t, tree, tree, tree, + tsubst_flags_t); +extern tree build_x_compound_expr_from_list (tree, expr_list_kind, + tsubst_flags_t); +extern tree build_x_compound_expr_from_vec (vec *, + const char *, tsubst_flags_t); +extern tree build_x_compound_expr (location_t, tree, tree, + tree, tsubst_flags_t); +extern tree build_compound_expr (location_t, tree, tree); +extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t); +extern tree build_static_cast (location_t, tree, tree, + tsubst_flags_t); +extern tree build_reinterpret_cast (location_t, tree, tree, + tsubst_flags_t); +extern tree build_const_cast (location_t, tree, tree, + tsubst_flags_t); +extern tree build_c_cast (location_t, tree, tree); +extern cp_expr build_c_cast (location_t loc, tree type, + cp_expr expr); +extern tree cp_build_c_cast (location_t, tree, tree, + tsubst_flags_t); +extern cp_expr build_x_modify_expr (location_t, tree, + enum tree_code, tree, + tree, tsubst_flags_t); +extern tree cp_build_modify_expr (location_t, tree, + enum tree_code, tree, + tsubst_flags_t); +extern tree convert_for_initialization (tree, tree, tree, int, + impl_conv_rhs, tree, int, + tsubst_flags_t); +extern int comp_ptr_ttypes (tree, tree); +extern bool comp_ptr_ttypes_const (tree, tree, compare_bounds_t); +extern bool error_type_p (const_tree); +extern bool ptr_reasonably_similar (const_tree, const_tree); +extern tree build_ptrmemfunc (tree, tree, int, bool, + tsubst_flags_t); +extern int cp_type_quals (const_tree); +extern int type_memfn_quals (const_tree); +extern cp_ref_qualifier type_memfn_rqual (const_tree); +extern tree apply_memfn_quals (tree, cp_cv_quals, + cp_ref_qualifier = REF_QUAL_NONE); +extern bool cp_has_mutable_p (const_tree); +extern bool at_least_as_qualified_p (const_tree, const_tree); +extern void cp_apply_type_quals_to_decl (int, tree); +extern tree build_ptrmemfunc1 (tree, tree, tree); +extern void expand_ptrmemfunc_cst (tree, tree *, tree *); +extern tree type_after_usual_arithmetic_conversions (tree, tree); +extern tree common_pointer_type (tree, tree); +extern tree composite_pointer_type (const op_location_t &, + tree, tree, tree, tree, + composite_pointer_operation, + tsubst_flags_t); +extern tree merge_types (tree, tree); +extern tree strip_array_domain (tree); +extern tree check_return_expr (tree, bool *); +extern tree spaceship_type (tree, tsubst_flags_t = tf_warning_or_error); +extern tree genericize_spaceship (location_t, tree, tree, tree); +extern tree cp_build_binary_op (const op_location_t &, + enum tree_code, tree, tree, + tsubst_flags_t); +extern tree build_x_vec_perm_expr (location_t, + tree, tree, tree, + tsubst_flags_t); +extern tree build_x_shufflevector (location_t, + vec *, + tsubst_flags_t); +#define cxx_sizeof(T) cxx_sizeof_or_alignof_type (input_location, T, SIZEOF_EXPR, false, true) +extern tree build_simple_component_ref (tree, tree); +extern tree build_ptrmemfunc_access_expr (tree, tree); +extern tree build_address (tree); +extern tree build_nop (tree, tree); +extern tree non_reference (tree); +extern tree lookup_anon_field (tree, tree); +extern bool invalid_nonstatic_memfn_p (location_t, tree, + tsubst_flags_t); +extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t); +extern tree convert_ptrmem (tree, tree, bool, bool, + tsubst_flags_t); +extern int lvalue_or_else (tree, enum lvalue_use, + tsubst_flags_t); +extern void check_template_keyword (tree); +extern bool check_raw_literal_operator (const_tree decl); +extern bool check_literal_operator_args (const_tree, bool *, bool *); +extern void maybe_warn_about_useless_cast (location_t, tree, tree, + tsubst_flags_t); +extern tree cp_perform_integral_promotions (tree, tsubst_flags_t); + +extern tree finish_left_unary_fold_expr (tree, int); +extern tree finish_right_unary_fold_expr (tree, int); +extern tree finish_binary_fold_expr (tree, tree, int); +extern tree treat_lvalue_as_rvalue_p (tree, bool); +extern bool decl_in_std_namespace_p (tree); + +/* in typeck2.cc */ +extern void require_complete_eh_spec_types (tree, tree); +extern void cxx_incomplete_type_diagnostic (location_t, const_tree, + const_tree, diagnostic_t); +inline location_t +loc_or_input_loc (location_t loc) +{ + return loc == UNKNOWN_LOCATION ? input_location : loc; +} + +inline location_t +cp_expr_loc_or_loc (const_tree t, location_t or_loc) +{ + location_t loc = cp_expr_location (t); + if (loc == UNKNOWN_LOCATION) + loc = or_loc; + return loc; +} + +inline location_t +cp_expr_loc_or_input_loc (const_tree t) +{ + return cp_expr_loc_or_loc (t, input_location); +} + +inline void +cxx_incomplete_type_diagnostic (const_tree value, const_tree type, + diagnostic_t diag_kind) +{ + cxx_incomplete_type_diagnostic (cp_expr_loc_or_input_loc (value), + value, type, diag_kind); +} + +extern void cxx_incomplete_type_error (location_t, const_tree, + const_tree); +inline void +cxx_incomplete_type_error (const_tree value, const_tree type) +{ + cxx_incomplete_type_diagnostic (value, type, DK_ERROR); +} + +extern void cxx_incomplete_type_inform (const_tree); +extern tree error_not_base_type (tree, tree); +extern tree binfo_or_else (tree, tree); +extern void cxx_readonly_error (location_t, tree, + enum lvalue_use); +extern void complete_type_check_abstract (tree); +extern int abstract_virtuals_error (tree, tree); +extern int abstract_virtuals_error (abstract_class_use, tree); +extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t); +extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t); + +extern tree store_init_value (tree, tree, vec**, int); +extern tree split_nonconstant_init (tree, tree); +extern bool check_narrowing (tree, tree, tsubst_flags_t, + bool = false); +extern bool ordinary_char_type_p (tree); +extern bool array_string_literal_compatible_p (tree, tree); +extern tree digest_init (tree, tree, tsubst_flags_t); +extern tree digest_init_flags (tree, tree, int, tsubst_flags_t); +extern tree digest_nsdmi_init (tree, tree, tsubst_flags_t); +extern tree build_scoped_ref (tree, tree, tree *); +extern tree build_x_arrow (location_t, tree, + tsubst_flags_t); +extern tree build_m_component_ref (tree, tree, tsubst_flags_t); +extern tree build_functional_cast (location_t, tree, tree, + tsubst_flags_t); +extern tree add_exception_specifier (tree, tree, tsubst_flags_t); +extern tree merge_exception_specifiers (tree, tree); + +/* in mangle.cc */ +extern void init_mangle (void); +extern void mangle_decl (tree); +extern const char *mangle_type_string (tree); +extern tree mangle_typeinfo_for_type (tree); +extern tree mangle_typeinfo_string_for_type (tree); +extern tree mangle_vtbl_for_type (tree); +extern tree mangle_vtt_for_type (tree); +extern tree mangle_ctor_vtbl_for_type (tree, tree); +extern tree mangle_thunk (tree, int, tree, tree, tree); +extern tree mangle_guard_variable (tree); +extern tree mangle_tls_init_fn (tree); +extern tree mangle_tls_wrapper_fn (tree); +extern bool decl_tls_wrapper_p (tree); +extern tree mangle_ref_init_variable (tree); +extern tree mangle_template_parm_object (tree); +extern char *get_mangled_vtable_map_var_name (tree); +extern bool mangle_return_type_p (tree); +extern tree mangle_decomp (tree, vec &); +extern void mangle_module_substitution (int); +extern int mangle_module_component (tree id, bool partition); +extern tree mangle_module_global_init (int); + +/* in dump.cc */ +extern bool cp_dump_tree (void *, tree); + +/* In cp/cp-objcp-common.cc. */ + +extern alias_set_type cxx_get_alias_set (tree); +extern bool cxx_warn_unused_global_decl (const_tree); +extern size_t cp_tree_size (enum tree_code); +extern bool cp_var_mod_type_p (tree, tree); +extern void cxx_initialize_diagnostics (diagnostic_context *); +extern int cxx_types_compatible_p (tree, tree); +extern bool cxx_block_may_fallthru (const_tree); + +/* in cp-gimplify.cc */ +extern int cp_gimplify_expr (tree *, gimple_seq *, + gimple_seq *); +extern void cp_genericize (tree); +extern bool cxx_omp_const_qual_no_mutable (tree); +extern enum omp_clause_default_kind cxx_omp_predetermined_sharing_1 (tree); +extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree); +extern enum omp_clause_defaultmap_kind cxx_omp_predetermined_mapping (tree); +extern tree cxx_omp_clause_default_ctor (tree, tree, tree); +extern tree cxx_omp_clause_copy_ctor (tree, tree, tree); +extern tree cxx_omp_clause_assign_op (tree, tree, tree); +extern tree cxx_omp_clause_dtor (tree, tree); +extern void cxx_omp_finish_clause (tree, gimple_seq *, bool); +extern bool cxx_omp_privatize_by_reference (const_tree); +extern bool cxx_omp_disregard_value_expr (tree, bool); +extern void cp_fold_function (tree); +extern tree cp_fold_maybe_rvalue (tree, bool); +extern tree cp_fold_rvalue (tree); +extern tree cp_fully_fold (tree); +extern tree cp_fully_fold_init (tree); +extern tree predeclare_vla (tree); +extern void clear_fold_cache (void); +extern tree lookup_hotness_attribute (tree); +extern tree process_stmt_hotness_attribute (tree, location_t); +extern bool simple_empty_class_p (tree, tree, tree_code); +extern tree fold_builtin_source_location (location_t); + +/* in name-lookup.cc */ +extern tree strip_using_decl (tree); +extern void diagnose_name_conflict (tree, tree); +extern bool dependent_local_decl_p (tree); + +/* Tell the binding oracle what kind of binding we are looking for. */ + +enum cp_oracle_request +{ + CP_ORACLE_IDENTIFIER +}; + +/* If this is non-NULL, then it is a "binding oracle" which can lazily + create bindings when needed by the C compiler. The oracle is told + the name and type of the binding to create. It can call pushdecl + or the like to ensure the binding is visible; or do nothing, + leaving the binding untouched. c-decl.cc takes note of when the + oracle has been called and will not call it again if it fails to + create a given binding. */ + +typedef void cp_binding_oracle_function (enum cp_oracle_request, tree identifier); + +extern cp_binding_oracle_function *cp_binding_oracle; + +/* Set during diagnostics to record the failed constraint. This is a + TREE_LIST whose VALUE is the constraint and whose PURPOSE are the + instantiation arguments Defined in pt.cc. */ + +extern tree current_failed_constraint; + +/* An RAII class to manage the failed constraint. */ + +struct diagnosing_failed_constraint +{ + diagnosing_failed_constraint (tree, tree, bool); + ~diagnosing_failed_constraint (); + static bool replay_errors_p (); + + bool diagnosing_error; +}; + +/* in constraint.cc */ + +extern cp_expr finish_constraint_or_expr (location_t, cp_expr, cp_expr); +extern cp_expr finish_constraint_and_expr (location_t, cp_expr, cp_expr); +extern cp_expr finish_constraint_primary_expr (cp_expr); +extern tree finish_concept_definition (cp_expr, tree); +extern tree combine_constraint_expressions (tree, tree); +extern tree append_constraint (tree, tree); +extern tree get_constraints (const_tree); +extern void set_constraints (tree, tree); +extern void remove_constraints (tree); +extern tree current_template_constraints (void); +extern tree associate_classtype_constraints (tree); +extern tree build_constraints (tree, tree); +extern tree maybe_substitute_reqs_for (tree, const_tree); +extern tree get_trailing_function_requirements (tree); +extern tree get_shorthand_constraints (tree); + +extern tree build_concept_id (tree); +extern tree build_type_constraint (tree, tree, tsubst_flags_t); +extern tree build_concept_check (tree, tree, tsubst_flags_t); +extern tree build_concept_check (tree, tree, tree, tsubst_flags_t); + +extern tree_pair finish_type_constraints (tree, tree, tsubst_flags_t); +extern tree build_constrained_parameter (tree, tree, tree = NULL_TREE); +extern void placeholder_extract_concept_and_args (tree, tree&, tree&); +extern bool equivalent_placeholder_constraints (tree, tree); +extern hashval_t hash_placeholder_constraint (tree); +extern bool deduce_constrained_parameter (tree, tree&, tree&); +extern tree resolve_constraint_check (tree); +extern tree check_function_concept (tree); +extern tree finish_template_introduction (tree, tree, location_t loc); +extern bool valid_requirements_p (tree); +extern tree finish_concept_name (tree); +extern tree finish_shorthand_constraint (tree, tree); +extern tree finish_requires_expr (location_t, tree, tree); +extern tree finish_simple_requirement (location_t, tree); +extern tree finish_type_requirement (location_t, tree); +extern tree finish_compound_requirement (location_t, tree, tree, bool); +extern tree finish_nested_requirement (location_t, tree); +extern tree tsubst_requires_expr (tree, tree, tsubst_flags_t, tree); +extern tree evaluate_requires_expr (tree); +extern tree tsubst_constraint (tree, tree, tsubst_flags_t, tree); +extern tree tsubst_constraint_info (tree, tree, tsubst_flags_t, tree); +extern tree tsubst_parameter_mapping (tree, tree, tsubst_flags_t, tree); + +struct processing_constraint_expression_sentinel +{ + processing_constraint_expression_sentinel (); + ~processing_constraint_expression_sentinel (); +}; + +extern bool processing_constraint_expression_p (); + +extern tree unpack_concept_check (tree); +extern tree evaluate_concept_check (tree); +extern bool constraints_satisfied_p (tree, tree = NULL_TREE); +extern bool* lookup_subsumption_result (tree, tree); +extern bool save_subsumption_result (tree, tree, bool); +extern tree find_template_parameters (tree, tree); +extern bool equivalent_constraints (tree, tree); +extern bool equivalently_constrained (tree, tree); +extern bool strictly_subsumes (tree, tree); +extern bool weakly_subsumes (tree, tree); +extern int more_constrained (tree, tree); +extern bool at_least_as_constrained (tree, tree); +extern bool constraints_equivalent_p (tree, tree); +extern bool atomic_constraints_identical_p (tree, tree); +extern hashval_t iterative_hash_constraint (tree, hashval_t); +extern hashval_t hash_atomic_constraint (tree); +extern void diagnose_constraints (location_t, tree, tree); + +extern void note_failed_type_completion_for_satisfaction (tree); + +/* A structural hasher for ATOMIC_CONSTRs. */ + +struct atom_hasher : default_hash_traits +{ + static hashval_t hash (tree t) + { + return hash_atomic_constraint (t); + } + + static bool equal (tree t1, tree t2) + { + return atomic_constraints_identical_p (t1, t2); + } +}; + +/* in logic.cc */ +extern bool subsumes (tree, tree); + +/* In class.cc */ +extern void set_current_access_from_decl (tree); +extern void cp_finish_injected_record_type (tree); + +/* in vtable-class-hierarchy.cc */ +extern void vtv_compute_class_hierarchy_transitive_closure (void); +extern void vtv_generate_init_routine (void); +extern void vtv_save_class_info (tree); +extern void vtv_recover_class_info (void); +extern void vtv_build_vtable_verify_fndecl (void); + +/* In constexpr.cc */ +/* Representation of entries in the constexpr function definition table. */ + +struct GTY((for_user)) constexpr_fundef { + tree decl; + tree body; + tree parms; + tree result; +}; + +extern void fini_constexpr (void); +extern bool literal_type_p (tree); +extern void maybe_save_constexpr_fundef (tree); +extern void register_constexpr_fundef (const constexpr_fundef &); +extern constexpr_fundef *retrieve_constexpr_fundef (tree); +extern bool is_valid_constexpr_fn (tree, bool); +extern bool check_constexpr_ctor_body (tree, tree, bool); +extern tree constexpr_fn_retval (tree); +extern tree ensure_literal_type_for_constexpr_object (tree); +extern bool potential_constant_expression (tree); +extern bool is_constant_expression (tree); +extern bool is_rvalue_constant_expression (tree); +extern bool is_nondependent_constant_expression (tree); +extern bool is_nondependent_static_init_expression (tree); +extern bool is_static_init_expression (tree); +extern bool potential_rvalue_constant_expression (tree); +extern bool require_potential_constant_expression (tree); +extern bool require_constant_expression (tree); +extern bool require_rvalue_constant_expression (tree); +extern bool require_potential_rvalue_constant_expression (tree); +extern tree cxx_constant_value (tree, tree = NULL_TREE); +extern tree cxx_constant_value_sfinae (tree, tree, tsubst_flags_t); +extern void cxx_constant_dtor (tree, tree); +extern tree cxx_constant_init (tree, tree = NULL_TREE); +extern tree maybe_constant_value (tree, tree = NULL_TREE, bool = false); +extern tree maybe_constant_init (tree, tree = NULL_TREE, bool = false); +extern tree fold_non_dependent_expr (tree, + tsubst_flags_t = tf_warning_or_error, + bool = false, tree = NULL_TREE); +extern tree maybe_fold_non_dependent_expr (tree, + tsubst_flags_t = tf_warning_or_error); +extern tree fold_non_dependent_init (tree, + tsubst_flags_t = tf_warning_or_error, + bool = false, tree = NULL_TREE); +extern tree fold_simple (tree); +extern bool reduced_constant_expression_p (tree); +extern bool is_instantiation_of_constexpr (tree); +extern bool var_in_constexpr_fn (tree); +extern bool var_in_maybe_constexpr_fn (tree); +extern bool maybe_constexpr_fn (tree); +extern void explain_invalid_constexpr_fn (tree); +extern vec cx_error_context (void); +extern tree fold_sizeof_expr (tree); +extern void clear_cv_and_fold_caches (void); +extern tree unshare_constructor (tree CXX_MEM_STAT_INFO); +extern bool decl_implicit_constexpr_p (tree); +extern bool replace_decl (tree *, tree, tree); + +/* An RAII sentinel used to restrict constexpr evaluation so that it + doesn't do anything that causes extra DECL_UID generation. */ + +struct uid_sensitive_constexpr_evaluation_sentinel +{ + temp_override ovr; + uid_sensitive_constexpr_evaluation_sentinel (); +}; + +/* Used to determine whether uid_sensitive_constexpr_evaluation_p was + called and returned true, indicating that we've restricted constexpr + evaluation in order to avoid UID generation. We use this to control + updates to the fold_cache and cv_cache. */ + +struct uid_sensitive_constexpr_evaluation_checker +{ + const unsigned saved_counter; + uid_sensitive_constexpr_evaluation_checker (); + bool evaluation_restricted_p () const; +}; + +void cp_tree_c_finish_parsing (); + +/* In cp-ubsan.cc */ +extern void cp_ubsan_maybe_instrument_member_call (tree); +extern void cp_ubsan_instrument_member_accesses (tree *); +extern tree cp_ubsan_maybe_instrument_downcast (location_t, tree, tree, tree); +extern tree cp_ubsan_maybe_instrument_cast_to_vbase (location_t, tree, tree); +extern void cp_ubsan_maybe_initialize_vtbl_ptrs (tree); + +/* In coroutines.cc */ +extern tree finish_co_return_stmt (location_t, tree); +extern tree finish_co_await_expr (location_t, tree); +extern tree finish_co_yield_expr (location_t, tree); +extern tree coro_validate_builtin_call (tree, + tsubst_flags_t = tf_warning_or_error); +extern bool morph_fn_to_coro (tree, tree *, tree *); +extern tree coro_get_actor_function (tree); +extern tree coro_get_destroy_function (tree); +extern tree coro_get_ramp_function (tree); + +/* Inline bodies. */ + +inline tree +ovl_first (tree node) +{ + while (TREE_CODE (node) == OVERLOAD) + node = OVL_FUNCTION (node); + return node; +} + +inline bool +type_unknown_p (const_tree expr) +{ + return TREE_TYPE (expr) == unknown_type_node; +} + +inline hashval_t +named_decl_hash::hash (const value_type decl) +{ + tree name = (TREE_CODE (decl) == BINDING_VECTOR + ? BINDING_VECTOR_NAME (decl) : OVL_NAME (decl)); + return name ? IDENTIFIER_HASH_VALUE (name) : 0; +} + +inline bool +named_decl_hash::equal (const value_type existing, compare_type candidate) +{ + tree name = (TREE_CODE (existing) == BINDING_VECTOR + ? BINDING_VECTOR_NAME (existing) : OVL_NAME (existing)); + return candidate == name; +} + +inline bool +null_node_p (const_tree expr) +{ + STRIP_ANY_LOCATION_WRAPPER (expr); + return expr == null_node; +} + +/* True iff T is a variable template declaration. */ +inline bool +variable_template_p (tree t) +{ + if (TREE_CODE (t) != TEMPLATE_DECL) + return false; + if (!PRIMARY_TEMPLATE_P (t)) + return false; + if (tree r = DECL_TEMPLATE_RESULT (t)) + return VAR_P (r); + return false; +} + +/* True iff T is a standard concept definition. This will return + true for both the template and underlying declaration. */ + +inline bool +standard_concept_p (tree t) +{ + if (TREE_CODE (t) == TEMPLATE_DECL) + t = DECL_TEMPLATE_RESULT (t); + return TREE_CODE (t) == CONCEPT_DECL; +} + +/* True iff T is a variable concept definition. This will return + true for both the template and the underlying declaration. */ + +inline bool +variable_concept_p (tree t) +{ + if (TREE_CODE (t) == TEMPLATE_DECL) + t = DECL_TEMPLATE_RESULT (t); + return VAR_P (t) && DECL_DECLARED_CONCEPT_P (t); +} + +/* True iff T is a function concept definition or an overload set + containing multiple function concepts. This will return true for + both the template and the underlying declaration. */ + +inline bool +function_concept_p (tree t) +{ + if (TREE_CODE (t) == OVERLOAD) + t = OVL_FIRST (t); + if (TREE_CODE (t) == TEMPLATE_DECL) + t = DECL_TEMPLATE_RESULT (t); + return TREE_CODE (t) == FUNCTION_DECL && DECL_DECLARED_CONCEPT_P (t); +} + +/* True iff T is a standard, variable, or function concept. */ + +inline bool +concept_definition_p (tree t) +{ + if (t == error_mark_node) + return false; + + /* Adjust for function concept overloads. */ + if (TREE_CODE (t) == OVERLOAD) + t = OVL_FIRST (t); + + /* See through templates. */ + if (TREE_CODE (t) == TEMPLATE_DECL) + t = DECL_TEMPLATE_RESULT (t); + + /* The obvious and easy case. */ + if (TREE_CODE (t) == CONCEPT_DECL) + return true; + + /* Definitely not a concept. */ + if (!VAR_OR_FUNCTION_DECL_P (t)) + return false; + if (!DECL_LANG_SPECIFIC (t)) + return false; + + return DECL_DECLARED_CONCEPT_P (t); +} + +/* Same as above, but for const trees. */ + +inline bool +concept_definition_p (const_tree t) +{ + return concept_definition_p (const_cast (t)); +} + +/* True if t is an expression that checks a concept. */ + +inline bool +concept_check_p (const_tree t) +{ + if (TREE_CODE (t) == CALL_EXPR) + t = CALL_EXPR_FN (t); + if (t && TREE_CODE (t) == TEMPLATE_ID_EXPR) + return concept_definition_p (TREE_OPERAND (t, 0)); + return false; +} + +/* Helpers for IMPLICIT_RVALUE_P to look through automatic dereference. */ + +inline bool +implicit_rvalue_p (const_tree t) +{ + if (REFERENCE_REF_P (t)) + t = TREE_OPERAND (t, 0); + return ((TREE_CODE (t) == NON_LVALUE_EXPR + || TREE_CODE (t) == STATIC_CAST_EXPR) + && IMPLICIT_RVALUE_P (t)); +} +inline tree +set_implicit_rvalue_p (tree ot) +{ + tree t = ot; + if (REFERENCE_REF_P (t)) + t = TREE_OPERAND (t, 0); + IMPLICIT_RVALUE_P (t) = 1; + return ot; +} + +/* True if t is a "constrained auto" type-specifier. */ + +inline bool +is_constrained_auto (const_tree t) +{ + return is_auto (t) && PLACEHOLDER_TYPE_CONSTRAINTS_INFO (t); +} + +/* True if CODE, a tree code, denotes a tree whose operand is not evaluated + as per [expr.context], i.e., an operand to sizeof, typeof, decltype, or + alignof. */ + +inline bool +unevaluated_p (tree_code code) +{ + return (code == DECLTYPE_TYPE + || code == ALIGNOF_EXPR + || code == SIZEOF_EXPR + || code == NOEXCEPT_EXPR + || code == REQUIRES_EXPR); +} + +/* RAII class to push/pop the access scope for T. */ + +struct push_access_scope_guard +{ + tree decl; + push_access_scope_guard (tree t) + : decl (t) + { + if (VAR_OR_FUNCTION_DECL_P (decl) + || TREE_CODE (decl) == TYPE_DECL) + push_access_scope (decl); + else + decl = NULL_TREE; + } + ~push_access_scope_guard () + { + if (decl) + pop_access_scope (decl); + } +}; + +#if CHECKING_P +namespace selftest { + extern void run_cp_tests (void); + + /* Declarations for specific families of tests within cp, + by source file, in alphabetical order. */ + extern void cp_pt_cc_tests (); + extern void cp_tree_cc_tests (void); +} // namespace selftest +#endif /* #if CHECKING_P */ + +/* -- end of C++ */ + +#endif /* ! GCC_CP_TREE_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/cxx-pretty-print.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/cxx-pretty-print.h new file mode 100644 index 0000000..920af64 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/cxx-pretty-print.h @@ -0,0 +1,117 @@ +/* Interface for the GNU C++ pretty-printer. + Copyright (C) 2003-2022 Free Software Foundation, Inc. + Contributed by Gabriel Dos Reis + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CXX_PRETTY_PRINT_H +#define GCC_CXX_PRETTY_PRINT_H + +#include "c-family/c-pretty-print.h" + +enum cxx_pretty_printer_flags +{ + /* Ask for a qualified-id. */ + pp_cxx_flag_default_argument = 1 << pp_c_flag_last_bit +}; + +class cxx_pretty_printer : public c_pretty_printer +{ +public: + cxx_pretty_printer (); + + pretty_printer *clone () const OVERRIDE; + + void constant (tree); + void id_expression (tree); + void primary_expression (tree); + void postfix_expression (tree); + void unary_expression (tree); + void multiplicative_expression (tree); + void conditional_expression (tree); + void assignment_expression (tree); + void expression (tree); + void type_id (tree); + void statement (tree); + void declaration (tree); + void declaration_specifiers (tree); + void simple_type_specifier (tree); + void function_specifier (tree); + void declarator (tree); + void direct_declarator (tree); + void abstract_declarator (tree); + void direct_abstract_declarator (tree); + + /* This is the enclosing scope of the entity being pretty-printed. */ + tree enclosing_scope; +}; + +#define pp_cxx_cv_qualifier_seq(PP, T) \ + pp_c_type_qualifier_list (PP, T) +#define pp_cxx_cv_qualifiers(PP, CV, FT) \ + pp_c_cv_qualifiers (PP, CV, FT) + +#define pp_cxx_whitespace(PP) pp_c_whitespace (PP) +#define pp_cxx_left_paren(PP) pp_c_left_paren (PP) +#define pp_cxx_right_paren(PP) pp_c_right_paren (PP) +#define pp_cxx_left_brace(PP) pp_c_left_brace (PP) +#define pp_cxx_right_brace(PP) pp_c_right_brace (PP) +#define pp_cxx_left_bracket(PP) pp_c_left_bracket (PP) +#define pp_cxx_right_bracket(PP) pp_c_right_bracket (PP) +#define pp_cxx_dot(PP) pp_c_dot (PP) +#define pp_cxx_ampersand(PP) pp_c_ampersand (PP) +#define pp_cxx_star(PP) pp_c_star (PP) +#define pp_cxx_arrow(PP) pp_c_arrow (PP) +#define pp_cxx_semicolon(PP) pp_c_semicolon (PP) +#define pp_cxx_complement(PP) pp_c_complement (PP) + +#define pp_cxx_ws_string(PP, I) pp_c_ws_string (PP, I) +#define pp_cxx_identifier(PP, I) pp_c_identifier (PP, I) +#define pp_cxx_tree_identifier(PP, T) \ + pp_c_tree_identifier (PP, T) + +void pp_cxx_begin_template_argument_list (cxx_pretty_printer *); +void pp_cxx_end_template_argument_list (cxx_pretty_printer *); +void pp_cxx_colon_colon (cxx_pretty_printer *); +void pp_cxx_separate_with (cxx_pretty_printer *, int); + +void pp_cxx_canonical_template_parameter (cxx_pretty_printer *, tree); +void pp_cxx_trait_expression (cxx_pretty_printer *, tree); +void pp_cxx_va_arg_expression (cxx_pretty_printer *, tree); +void pp_cxx_offsetof_expression (cxx_pretty_printer *, tree); +void pp_cxx_addressof_expression (cxx_pretty_printer *, tree); +void pp_cxx_userdef_literal (cxx_pretty_printer *, tree); +void pp_cxx_requires_clause (cxx_pretty_printer *, tree); +void pp_cxx_requires_expr (cxx_pretty_printer *, tree); +void pp_cxx_simple_requirement (cxx_pretty_printer *, tree); +void pp_cxx_type_requirement (cxx_pretty_printer *, tree); +void pp_cxx_compound_requirement (cxx_pretty_printer *, tree); +void pp_cxx_nested_requirement (cxx_pretty_printer *, tree); +void pp_cxx_predicate_constraint (cxx_pretty_printer *, tree); +void pp_cxx_expression_constraint (cxx_pretty_printer *, tree); +void pp_cxx_type_constraint (cxx_pretty_printer *, tree); +void pp_cxx_implicit_conversion_constraint (cxx_pretty_printer *, tree); +void pp_cxx_argument_deduction_constraint (cxx_pretty_printer *, tree); +void pp_cxx_exception_constraint (cxx_pretty_printer *, tree); +void pp_cxx_parameterized_constraint (cxx_pretty_printer *, tree); +void pp_cxx_conjunction (cxx_pretty_printer *, tree); +void pp_cxx_disjunction (cxx_pretty_printer *, tree); +void pp_cxx_constraint (cxx_pretty_printer *, tree); +void pp_cxx_constrained_type_spec (cxx_pretty_printer *, tree); +void pp_cxx_parameter_mapping (cxx_pretty_printer *, tree); + +#endif /* GCC_CXX_PRETTY_PRINT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/name-lookup.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/name-lookup.h new file mode 100644 index 0000000..fa03902 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/name-lookup.h @@ -0,0 +1,502 @@ +/* Declarations for -*- C++ -*- name lookup routines. + Copyright (C) 2003-2022 Free Software Foundation, Inc. + Contributed by Gabriel Dos Reis + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CP_NAME_LOOKUP_H +#define GCC_CP_NAME_LOOKUP_H + +#include "c-family/c-common.h" + + +/* The datatype used to implement C++ scope. */ +struct cp_binding_level; + +/* Nonzero if this binding is for a local scope, as opposed to a class + or namespace scope. */ +#define LOCAL_BINDING_P(NODE) ((NODE)->is_local) + +/* True if NODE->value is from a base class of the class which is + currently being defined. */ +#define INHERITED_VALUE_BINDING_P(NODE) ((NODE)->value_is_inherited) + +/* The IMPLICIT_TYPEDEF is hidden from ordinary name lookup (it was + injected via a local class's friend decl). The typdef may be in the + VALUE or the TYPE slot. We do not get the situation where the + value and type slots are both filled and both hidden. */ +#define HIDDEN_TYPE_BINDING_P(NODE) ((NODE)->type_is_hidden) + +/* Datatype that represents binding established by a declaration between + a name and a C++ entity. */ +struct GTY(()) cxx_binding { + /* Link to chain together various bindings for this name. */ + cxx_binding *previous; + /* The non-type entity this name is bound to. */ + tree value; + /* The type entity this name is bound to. */ + tree type; + /* The scope at which this binding was made. */ + cp_binding_level *scope; + + bool value_is_inherited : 1; + bool is_local : 1; + bool type_is_hidden : 1; +}; + +/* Datatype used to temporarily save C++ bindings (for implicit + instantiations purposes and like). Implemented in decl.cc. */ +struct GTY(()) cxx_saved_binding { + /* The name of the current binding. */ + tree identifier; + /* The binding we're saving. */ + cxx_binding *binding; + tree real_type_value; +}; + +/* To support lazy module loading, we squirrel away a section number + (and a couple of flags) in the binding slot of unloaded bindings. + We rely on pointers being aligned and setting the bottom bit to + mark a lazy value. GTY doesn't like an array of union, so we have + a containing struct. */ + +struct GTY(()) binding_slot { + union GTY((desc ("%1.is_lazy ()"))) binding_slot_lazy { + tree GTY((tag ("false"))) binding; + } u; + + operator tree & () + { + gcc_checking_assert (!is_lazy ()); + return u.binding; + } + binding_slot &operator= (tree t) + { + u.binding = t; + return *this; + } + bool is_lazy () const + { + return bool (uintptr_t (u.binding) & 1); + } + void set_lazy (unsigned snum) + { + gcc_checking_assert (!u.binding); + u.binding = tree (uintptr_t ((snum << 1) | 1)); + } + void or_lazy (unsigned snum) + { + gcc_checking_assert (is_lazy ()); + u.binding = tree (uintptr_t (u.binding) | (snum << 1)); + } + unsigned get_lazy () const + { + gcc_checking_assert (is_lazy ()); + return unsigned (uintptr_t (u.binding) >> 1); + } +}; + +/* Bindings for modules are held in a sparse array. There is always a + current TU slot, others are allocated as needed. By construction + of the importing mechanism we only ever need to append to the + array. Rather than have straight index/slot tuples, we bunch them + up for greater packing. + + The cluster representation packs well on a 64-bit system. */ + +#define BINDING_VECTOR_SLOTS_PER_CLUSTER 2 +struct binding_index { + unsigned short base; + unsigned short span; +}; + +struct GTY(()) binding_cluster +{ + binding_index GTY((skip)) indices[BINDING_VECTOR_SLOTS_PER_CLUSTER]; + binding_slot slots[BINDING_VECTOR_SLOTS_PER_CLUSTER]; +}; + +/* These two fields overlay lang flags. So don't use those. */ +#define BINDING_VECTOR_ALLOC_CLUSTERS(NODE) \ + (BINDING_VECTOR_CHECK (NODE)->base.u.dependence_info.clique) +#define BINDING_VECTOR_NUM_CLUSTERS(NODE) \ + (BINDING_VECTOR_CHECK (NODE)->base.u.dependence_info.base) +#define BINDING_VECTOR_CLUSTER_BASE(NODE) \ + (((tree_binding_vec *)BINDING_VECTOR_CHECK (NODE))->vec) +#define BINDING_VECTOR_CLUSTER_LAST(NODE) \ + (&BINDING_VECTOR_CLUSTER (NODE, BINDING_VECTOR_NUM_CLUSTERS (NODE) - 1)) +#define BINDING_VECTOR_CLUSTER(NODE,IX) \ + (((tree_binding_vec *)BINDING_VECTOR_CHECK (NODE))->vec[IX]) + +struct GTY(()) tree_binding_vec { + struct tree_base base; + tree name; + binding_cluster GTY((length ("%h.base.u.dependence_info.base"))) vec[1]; +}; + +/* The name of a module vector. */ +#define BINDING_VECTOR_NAME(NODE) \ + (((tree_binding_vec *)BINDING_VECTOR_CHECK (NODE))->name) + +/* tree_binding_vec does uses base.u.dependence_info.base field for + length. It does not have lang_flag etc available! */ + +/* These two flags note if a module-vector contains deduplicated + bindings (i.e. multiple declarations in different imports). */ +/* This binding contains duplicate references to a global module + entity. */ +#define BINDING_VECTOR_GLOBAL_DUPS_P(NODE) \ + (BINDING_VECTOR_CHECK (NODE)->base.static_flag) +/* This binding contains duplicate references to a partioned module + entity. */ +#define BINDING_VECTOR_PARTITION_DUPS_P(NODE) \ + (BINDING_VECTOR_CHECK (NODE)->base.volatile_flag) + +/* These two flags indicate the provenence of the bindings on this + particular vector slot. We can of course determine this from slot + number, but that's a relatively expensive lookup. This avoids + that when iterating. */ +/* This slot is part of the global module (a header unit). */ +#define MODULE_BINDING_GLOBAL_P(NODE) \ + (OVERLOAD_CHECK (NODE)->base.static_flag) +/* This slot is part of the current module (a partition or primary). */ +#define MODULE_BINDING_PARTITION_P(NODE) \ + (OVERLOAD_CHECK (NODE)->base.volatile_flag) + +extern void set_identifier_type_value (tree, tree); +extern void push_binding (tree, tree, cp_binding_level*); +extern void pop_local_binding (tree, tree); +extern void pop_bindings_and_leave_scope (void); +extern tree constructor_name (tree); +extern bool constructor_name_p (tree, tree); + +/* The kinds of scopes we recognize. */ +enum scope_kind { + sk_block = 0, /* An ordinary block scope. This enumerator must + have the value zero because "cp_binding_level" + is initialized by using "memset" to set the + contents to zero, and the default scope kind + is "sk_block". */ + sk_cleanup, /* A scope for (pseudo-)scope for cleanup. It is + pseudo in that it is transparent to name lookup + activities. */ + sk_try, /* A try-block. */ + sk_catch, /* A catch-block. */ + sk_for, /* The scope of the variable declared in a + init-statement. */ + sk_cond, /* The scope of the variable declared in the condition + of an if or switch statement. */ + sk_function_parms, /* The scope containing function parameters. */ + sk_class, /* The scope containing the members of a class. */ + sk_scoped_enum, /* The scope containing the enumerators of a C++11 + scoped enumeration. */ + sk_namespace, /* The scope containing the members of a + namespace, including the global scope. */ + sk_template_parms, /* A scope for template parameters. */ + sk_template_spec, /* Like sk_template_parms, but for an explicit + specialization. Since, by definition, an + explicit specialization is introduced by + "template <>", this scope is always empty. */ + sk_transaction, /* A synchronized or atomic statement. */ + sk_omp /* An OpenMP structured block. */ +}; + +struct GTY(()) cp_class_binding { + cxx_binding *base; + /* The bound name. */ + tree identifier; +}; + +/* For each binding contour we allocate a binding_level structure + which records the names defined in that contour. + Contours include: + 0) the global one + 1) one for each function definition, + where internal declarations of the parameters appear. + 2) one for each compound statement, + to record its declarations. + + The current meaning of a name can be found by searching the levels + from the current one out to the global one. + + Off to the side, may be the class_binding_level. This exists only + to catch class-local declarations. It is otherwise nonexistent. + + Also there may be binding levels that catch cleanups that must be + run when exceptions occur. Thus, to see whether a name is bound in + the current scope, it is not enough to look in the + CURRENT_BINDING_LEVEL. You should use lookup_name_current_level + instead. */ + +struct GTY(()) cp_binding_level { + /* A chain of _DECL nodes for all variables, constants, functions, + and typedef types. These are in the reverse of the order + supplied. There may be OVERLOADs on this list, too, but they + are wrapped in TREE_LISTs; the TREE_VALUE is the OVERLOAD. */ + tree names; + + /* Using directives. */ + vec *using_directives; + + /* For the binding level corresponding to a class, the entities + declared in the class or its base classes. */ + vec *class_shadowed; + + /* Similar to class_shadowed, but for IDENTIFIER_TYPE_VALUE, and + is used for all binding levels. The TREE_PURPOSE is the name of + the entity, the TREE_TYPE is the associated type. In addition + the TREE_VALUE is the IDENTIFIER_TYPE_VALUE before we entered + the class. */ + tree type_shadowed; + + /* For each level (except not the global one), + a chain of BLOCK nodes for all the levels + that were entered and exited one level down. */ + tree blocks; + + /* The entity (namespace, class, function) the scope of which this + binding contour corresponds to. Otherwise NULL. */ + tree this_entity; + + /* The binding level which this one is contained in (inherits from). */ + cp_binding_level *level_chain; + + /* STATEMENT_LIST for statements in this binding contour. + Only used at present for SK_CLEANUP temporary bindings. */ + tree statement_list; + + /* Binding depth at which this level began. */ + int binding_depth; + + /* The kind of scope that this object represents. However, a + SK_TEMPLATE_SPEC scope is represented with KIND set to + SK_TEMPLATE_PARMS and EXPLICIT_SPEC_P set to true. */ + ENUM_BITFIELD (scope_kind) kind : 4; + + /* True if this scope is an SK_TEMPLATE_SPEC scope. This field is + only valid if KIND == SK_TEMPLATE_PARMS. */ + BOOL_BITFIELD explicit_spec_p : 1; + + /* true means make a BLOCK for this level regardless of all else. */ + unsigned keep : 1; + + /* Nonzero if this level can safely have additional + cleanup-needing variables added to it. */ + unsigned more_cleanups_ok : 1; + unsigned have_cleanups : 1; + + /* Transient state set if this scope is of sk_class kind + and is in the process of defining 'this_entity'. Reset + on leaving the class definition to allow for the scope + to be subsequently re-used as a non-defining scope for + 'this_entity'. */ + unsigned defining_class_p : 1; + + /* true for SK_FUNCTION_PARMS of immediate functions. */ + unsigned immediate_fn_ctx_p : 1; + + /* True for SK_FUNCTION_PARMS of a requires-expression. */ + unsigned requires_expression: 1; + + /* 21 bits left to fill a 32-bit word. */ +}; + +/* The binding level currently in effect. */ + +#define current_binding_level \ + (*(cfun && cp_function_chain && cp_function_chain->bindings \ + ? &cp_function_chain->bindings \ + : &scope_chain->bindings)) + +/* The binding level of the current class, if any. */ + +#define class_binding_level scope_chain->class_bindings + +/* True if SCOPE designates the global scope binding contour. */ +#define global_scope_p(SCOPE) \ + ((SCOPE) == NAMESPACE_LEVEL (global_namespace)) + +extern cp_binding_level *leave_scope (void); +extern bool kept_level_p (void); +extern bool global_bindings_p (void); +extern bool toplevel_bindings_p (void); +extern bool namespace_bindings_p (void); +extern bool local_bindings_p (void); +extern bool template_parm_scope_p (void); +extern scope_kind innermost_scope_kind (void); +extern cp_binding_level *begin_scope (scope_kind, tree); +extern void print_binding_stack (void); +extern void pop_everything (void); +extern void keep_next_level (bool); +extern bool is_ancestor (tree ancestor, tree descendant); +extern bool is_nested_namespace (tree parent, tree descendant, + bool inline_only = false); +extern tree push_scope (tree); +extern void pop_scope (tree); +extern tree push_inner_scope (tree); +extern void pop_inner_scope (tree, tree); +extern void push_binding_level (cp_binding_level *); + +extern bool handle_namespace_attrs (tree, tree); +extern void pushlevel_class (void); +extern void poplevel_class (void); + +/* What kind of scopes name lookup looks in. An enum class so we + don't accidentally mix integers. */ +enum class LOOK_where +{ + BLOCK = 1 << 0, /* Consider block scopes. */ + CLASS = 1 << 1, /* Consider class scopes. */ + NAMESPACE = 1 << 2, /* Consider namespace scopes. */ + + ALL = BLOCK | CLASS | NAMESPACE, + BLOCK_NAMESPACE = BLOCK | NAMESPACE, + CLASS_NAMESPACE = CLASS | NAMESPACE, +}; +constexpr LOOK_where operator| (LOOK_where a, LOOK_where b) +{ + return LOOK_where (unsigned (a) | unsigned (b)); +} +constexpr LOOK_where operator& (LOOK_where a, LOOK_where b) +{ + return LOOK_where (unsigned (a) & unsigned (b)); +} + +enum class LOOK_want +{ + NORMAL = 0, /* Normal lookup -- non-types can hide implicit types. */ + TYPE = 1 << 1, /* We only want TYPE_DECLS. */ + NAMESPACE = 1 << 2, /* We only want NAMESPACE_DECLS. */ + + HIDDEN_FRIEND = 1 << 3, /* See hidden friends. */ + HIDDEN_LAMBDA = 1 << 4, /* See lambda-ignored entities. */ + + TYPE_NAMESPACE = TYPE | NAMESPACE, /* Either NAMESPACE or TYPE. */ +}; +constexpr LOOK_want operator| (LOOK_want a, LOOK_want b) +{ + return LOOK_want (unsigned (a) | unsigned (b)); +} +constexpr LOOK_want operator& (LOOK_want a, LOOK_want b) +{ + return LOOK_want (unsigned (a) & unsigned (b)); +} + +extern tree lookup_name (tree, LOOK_where, LOOK_want = LOOK_want::NORMAL); +/* Also declared in c-family/c-common.h. */ +extern tree lookup_name (tree name); +inline tree lookup_name (tree name, LOOK_want want) +{ + return lookup_name (name, LOOK_where::ALL, want); +} + +enum class TAG_how +{ + CURRENT_ONLY = 0, // Look and insert only in current scope + + GLOBAL = 1, // Unqualified lookup, innermost-non-class insertion + + INNERMOST_NON_CLASS = 2, // Look and insert only into + // innermost-non-class + + HIDDEN_FRIEND = 3, // As INNERMOST_NON_CLASS, but hide it +}; + +extern tree lookup_elaborated_type (tree, TAG_how); +extern tree get_namespace_binding (tree ns, tree id); +extern void set_global_binding (tree decl); +inline tree get_global_binding (tree id) +{ + return get_namespace_binding (NULL_TREE, id); +} +extern tree lookup_qualified_name (tree scope, tree name, + LOOK_want = LOOK_want::NORMAL, + bool = true); +extern tree lookup_qualified_name (tree scope, const char *name, + LOOK_want = LOOK_want::NORMAL, + bool = true); +extern bool pushdecl_class_level (tree); +extern tree pushdecl_namespace_level (tree, bool hiding = false); +extern bool push_class_level_binding (tree, tree); +extern tree get_local_decls (); +extern int function_parm_depth (void); +extern tree cp_namespace_decls (tree); +extern void set_decl_namespace (tree, tree, bool); +extern void push_decl_namespace (tree); +extern void pop_decl_namespace (void); +extern void do_namespace_alias (tree, tree); +extern tree do_class_using_decl (tree, tree); +extern tree lookup_arg_dependent (tree, tree, vec *); +extern tree search_anon_aggr (tree, tree, bool = false); +extern tree get_class_binding_direct (tree, tree, bool want_type = false); +extern tree get_class_binding (tree, tree, bool want_type = false); +extern tree *find_member_slot (tree klass, tree name); +extern tree *add_member_slot (tree klass, tree name); +extern void resort_type_member_vec (void *, void *, + gt_pointer_operator, void *); +extern vec *set_class_bindings (tree, int extra = 0); +extern void insert_late_enum_def_bindings (tree, tree); +extern tree innermost_non_namespace_value (tree); +extern cxx_binding *outer_binding (tree, cxx_binding *, bool); +extern void cp_emit_debug_info_for_using (tree, tree); + +extern void finish_nonmember_using_decl (tree scope, tree name); +extern void finish_using_directive (tree target, tree attribs); +void push_local_extern_decl_alias (tree decl); +extern tree pushdecl (tree, bool hiding = false); +extern tree pushdecl_outermost_localscope (tree); +extern tree pushdecl_top_level (tree); +extern tree pushdecl_top_level_and_finish (tree, tree); +extern tree pushtag (tree, tree, TAG_how = TAG_how::CURRENT_ONLY); +extern int push_namespace (tree, bool make_inline = false); +extern void pop_namespace (void); +extern void push_nested_namespace (tree); +extern void pop_nested_namespace (tree); +extern void push_to_top_level (void); +extern void pop_from_top_level (void); +extern void push_using_decl_bindings (tree, tree); + +/* Lower level interface for modules. */ +extern tree *mergeable_namespace_slots (tree ns, tree name, bool is_global, + tree *mvec); +extern void add_mergeable_namespace_entity (tree *slot, tree decl); +extern tree lookup_class_binding (tree ctx, tree name); +extern bool import_module_binding (tree ctx, tree name, unsigned mod, + unsigned snum); +extern bool set_module_binding (tree ctx, tree name, unsigned mod, + int mod_glob_flag, + tree value, tree type, tree visible); +extern void add_module_namespace_decl (tree ns, tree decl); + +enum WMB_Flags +{ + WMB_None = 0, + WMB_Dups = 1 << 0, + WMB_Export = 1 << 1, + WMB_Using = 1 << 2, + WMB_Hidden = 1 << 3, +}; + +extern unsigned walk_module_binding (tree binding, bitmap partitions, + bool (*)(tree decl, WMB_Flags, void *data), + void *data); +extern tree add_imported_namespace (tree ctx, tree name, location_t, + unsigned module, + bool inline_p, bool visible_p); +extern const char *get_cxx_dialect_name (enum cxx_dialect dialect); + +#endif /* GCC_CP_NAME_LOOKUP_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/operators.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/operators.def new file mode 100644 index 0000000..27b8559 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/operators.def @@ -0,0 +1,163 @@ +/* -*-C-*- + + This file contains definitions of the various C++ operators, + including both overloadable operators (like `+') and + non-overloadable operators (like the `?:' ternary operator). + Written by Mark Mitchell + + Copyright (C) 2000-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* The DEF_OPERATOR macro takes the following arguments: + + NAME + + The name of the operator, as a C string, but without the + preceding `operator'. This is the name that would be given in + the source program. For `operator +', for example, this would be + `+'. + + CODE + + The tree_code for this operator. For `operator +', for example, + this would be PLUS_EXPR. Because there are no tree codes for + assignment operators, the same tree-codes are reused; i.e., + `operator +' will also have PLUS_EXPR as its CODE. + + MANGLING + + The mangling prefix for the operator, as a C string, and as + mangled under the new ABI. For `operator +', for example, this + would be "pl". + + FLAGS + + ovl_op_flags bits. Postincrement and postdecrement operators are + marked as binary. + + Before including this file, you should define DEF_OPERATOR + to take these arguments. + + There is code (such as in grok_op_properties) that depends on the + order the operators are presented in this file. Unary_ops must + preceed a matching binary op (i.e. '+'). Assignment operators must + be last, after OPERATOR_TRANSITION. */ + +/* Use DEF_ASSN_OPERATOR to define an assignment operator. Its + arguments are as for DEF_OPERATOR, but there is no need to provide + FLAGS (OVL_OP_FLAG_BINARY). */ + +#ifndef DEF_ASSN_OPERATOR +#define DEF_ASSN_OPERATOR(NAME, CODE, MANGLING) \ + DEF_OPERATOR(NAME, CODE, MANGLING, OVL_OP_FLAG_BINARY) +#endif + +/* Memory allocation operators. ARITY has special meaning. */ +DEF_OPERATOR ("new", NEW_EXPR, "nw", OVL_OP_FLAG_ALLOC) +DEF_OPERATOR ("new []", VEC_NEW_EXPR, "na", + OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_VEC) +DEF_OPERATOR ("delete", DELETE_EXPR, "dl", + OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE) +DEF_OPERATOR ("delete []", VEC_DELETE_EXPR, "da", + OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE | OVL_OP_FLAG_VEC) + +/* Unary operators. */ +DEF_OPERATOR ("+", UNARY_PLUS_EXPR, "ps", OVL_OP_FLAG_UNARY) +DEF_OPERATOR ("-", NEGATE_EXPR, "ng", OVL_OP_FLAG_UNARY) +DEF_OPERATOR ("&", ADDR_EXPR, "ad", OVL_OP_FLAG_UNARY) +DEF_OPERATOR ("*", INDIRECT_REF, "de", OVL_OP_FLAG_UNARY) +DEF_OPERATOR ("~", BIT_NOT_EXPR, "co", OVL_OP_FLAG_UNARY) +DEF_OPERATOR ("!", TRUTH_NOT_EXPR, "nt", OVL_OP_FLAG_UNARY) +DEF_OPERATOR ("++", PREINCREMENT_EXPR, "pp", OVL_OP_FLAG_UNARY) +DEF_OPERATOR ("--", PREDECREMENT_EXPR, "mm", OVL_OP_FLAG_UNARY) +DEF_OPERATOR ("->", COMPONENT_REF, "pt", OVL_OP_FLAG_UNARY) +DEF_OPERATOR ("sizeof", SIZEOF_EXPR, "sz", OVL_OP_FLAG_UNARY) +DEF_OPERATOR ("co_await", CO_AWAIT_EXPR, "aw", OVL_OP_FLAG_UNARY) + +/* These are extensions. */ +DEF_OPERATOR ("alignof", ALIGNOF_EXPR, "az", OVL_OP_FLAG_UNARY) +DEF_OPERATOR ("__imag__", IMAGPART_EXPR, "v18__imag__", OVL_OP_FLAG_UNARY) +DEF_OPERATOR ("__real__", REALPART_EXPR, "v18__real__", OVL_OP_FLAG_UNARY) + +/* Binary operators. */ +DEF_OPERATOR ("+", PLUS_EXPR, "pl", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("-", MINUS_EXPR, "mi", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("*", MULT_EXPR, "ml", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("/", TRUNC_DIV_EXPR, "dv", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("%", TRUNC_MOD_EXPR, "rm", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("&", BIT_AND_EXPR, "an", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("|", BIT_IOR_EXPR, "or", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("^", BIT_XOR_EXPR, "eo", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("<<", LSHIFT_EXPR, "ls", OVL_OP_FLAG_BINARY) +DEF_OPERATOR (">>", RSHIFT_EXPR, "rs", OVL_OP_FLAG_BINARY) + +/* defaultable_fn_check relies on the ordering of the comparison operators. */ +DEF_OPERATOR ("==", EQ_EXPR, "eq", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("!=", NE_EXPR, "ne", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("<", LT_EXPR, "lt", OVL_OP_FLAG_BINARY) +DEF_OPERATOR (">", GT_EXPR, "gt", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("<=", LE_EXPR, "le", OVL_OP_FLAG_BINARY) +DEF_OPERATOR (">=", GE_EXPR, "ge", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("<=>", SPACESHIP_EXPR, "ss", OVL_OP_FLAG_BINARY) + +DEF_OPERATOR ("&&", TRUTH_ANDIF_EXPR, "aa", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("||", TRUTH_ORIF_EXPR, "oo", OVL_OP_FLAG_BINARY) +DEF_OPERATOR (",", COMPOUND_EXPR, "cm", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("->*", MEMBER_REF, "pm", OVL_OP_FLAG_BINARY) +DEF_OPERATOR (".*", DOTSTAR_EXPR, "ds", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("[]", ARRAY_REF, "ix", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("++", POSTINCREMENT_EXPR, "pp", OVL_OP_FLAG_BINARY) +DEF_OPERATOR ("--", POSTDECREMENT_EXPR, "mm", OVL_OP_FLAG_BINARY) + +/* Miscellaneous. */ +DEF_OPERATOR ("?:", COND_EXPR, "qu", OVL_OP_FLAG_NONE) +DEF_OPERATOR ("()", CALL_EXPR, "cl", OVL_OP_FLAG_NONE) + +/* Operators needed for mangling. */ +DEF_OPERATOR (NULL, CAST_EXPR, "cv", OVL_OP_FLAG_UNARY) +DEF_OPERATOR (NULL, DYNAMIC_CAST_EXPR, "dc", OVL_OP_FLAG_UNARY) +DEF_OPERATOR (NULL, REINTERPRET_CAST_EXPR, "rc", OVL_OP_FLAG_UNARY) +DEF_OPERATOR (NULL, CONST_CAST_EXPR, "cc", OVL_OP_FLAG_UNARY) +DEF_OPERATOR (NULL, STATIC_CAST_EXPR, "sc", OVL_OP_FLAG_UNARY) +DEF_OPERATOR (NULL, SCOPE_REF, "sr", OVL_OP_FLAG_NONE) +DEF_OPERATOR (NULL, EXPR_PACK_EXPANSION, "sp", OVL_OP_FLAG_NONE) +DEF_OPERATOR (NULL, UNARY_LEFT_FOLD_EXPR, "fl", OVL_OP_FLAG_NONE) +DEF_OPERATOR (NULL, UNARY_RIGHT_FOLD_EXPR, "fr", OVL_OP_FLAG_NONE) +DEF_OPERATOR (NULL, BINARY_LEFT_FOLD_EXPR, "fL", OVL_OP_FLAG_NONE) +DEF_OPERATOR (NULL, BINARY_RIGHT_FOLD_EXPR, "fR", OVL_OP_FLAG_NONE) + +#ifdef OPERATOR_TRANSITION +OPERATOR_TRANSITION +#undef OPERATOR_TRANSITION +#endif + +/* Assignment operators. */ +DEF_ASSN_OPERATOR ("=", NOP_EXPR, "aS") +DEF_ASSN_OPERATOR ("+=", PLUS_EXPR, "pL") +DEF_ASSN_OPERATOR ("-=", MINUS_EXPR, "mI") +DEF_ASSN_OPERATOR ("*=", MULT_EXPR, "mL") +DEF_ASSN_OPERATOR ("/=", TRUNC_DIV_EXPR, "dV") +DEF_ASSN_OPERATOR ("%=", TRUNC_MOD_EXPR, "rM") +DEF_ASSN_OPERATOR ("&=", BIT_AND_EXPR, "aN") +DEF_ASSN_OPERATOR ("|=", BIT_IOR_EXPR, "oR") +DEF_ASSN_OPERATOR ("^=", BIT_XOR_EXPR, "eO") +DEF_ASSN_OPERATOR ("<<=", LSHIFT_EXPR, "lS") +DEF_ASSN_OPERATOR (">>=", RSHIFT_EXPR, "rS") + +#undef DEF_ASSN_OPERATOR +#undef DEF_OPERATOR diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/type-utils.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/type-utils.h new file mode 100644 index 0000000..749fe83 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cp/type-utils.h @@ -0,0 +1,54 @@ +/* Utilities for querying and manipulating type trees. + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CP_TYPE_UTILS_H +#define GCC_CP_TYPE_UTILS_H + +/* Returns the first tree within T that is directly matched by PRED. T may be a + type or PARM_DECL and is incrementally decomposed toward its type-specifier + until a match is found. NULL is returned if PRED does not match any + part of T. + + This is primarily intended for detecting whether T uses `auto' or a concept + identifier. Since either of these can only appear as a type-specifier for + the declaration in question, only top-level qualifications are traversed; + find_type_usage does not look through the whole type. */ + +inline tree +find_type_usage (tree t, bool (*pred) (const_tree)) +{ + if (pred (t)) + return t; + + enum tree_code code = TREE_CODE (t); + + if (code == POINTER_TYPE || code == REFERENCE_TYPE + || code == PARM_DECL || code == OFFSET_TYPE + || code == FUNCTION_TYPE || code == METHOD_TYPE + || code == ARRAY_TYPE) + return find_type_usage (TREE_TYPE (t), pred); + + if (TYPE_PTRMEMFUNC_P (t)) + return find_type_usage + (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (t)), pred); + + return NULL_TREE; +} + +#endif // GCC_CP_TYPE_UTILS_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cppbuiltin.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cppbuiltin.h new file mode 100644 index 0000000..e84bcc6 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cppbuiltin.h @@ -0,0 +1,33 @@ +/* Define builtin-in macros for all front ends that perform preprocessing + Copyright (C) 2010-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CPPBUILTIN_H +#define GCC_CPPBUILTIN_H + +/* Parse a BASEVER version string of the format "major.minor.patchlevel" + or "major.minor" to extract its components. */ +extern void parse_basever (int *, int *, int *); + +/* Define macros builtins common to all language performing CPP + preprocessing. */ +extern void define_language_independent_builtin_macros (cpp_reader *); + + +#endif /* ! GCC_CPPBUILTIN_H */ + diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cppdefault.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cppdefault.h new file mode 100644 index 0000000..fb97c0b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cppdefault.h @@ -0,0 +1,76 @@ +/* CPP Library. + Copyright (C) 1986-2022 Free Software Foundation, Inc. + Contributed by Per Bothner, 1994-95. + Based on CCCP program by Paul Rubin, June 1986 + Adapted to ANSI C, Richard Stallman, Jan 1987 + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the + Free Software Foundation; either version 3, or (at your option) any + later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING3. If not see + . */ + +#ifndef GCC_CPPDEFAULT_H +#define GCC_CPPDEFAULT_H + +/* This is the default list of directories to search for include files. + It may be overridden by the various -I and -ixxx options. + + #include "file" looks in the same directory as the current file, + then this list. + #include just looks in this list. + + All these directories are treated as `system' include directories + (they are not subject to pedantic warnings in some cases). */ + +struct default_include +{ + const char *const fname; /* The name of the directory. */ + const char *const component; /* The component containing the directory + (see update_path in prefix.cc) */ + const char cplusplus; /* When this is non-zero, we should only + consider this if we're compiling C++. + When the -stdlib option is configured, this + may take values greater than 1 to indicate + which C++ standard library should be + used. */ + const char cxx_aware; /* Includes in this directory don't need to + be wrapped in extern "C" when compiling + C++. */ + const char add_sysroot; /* FNAME should be prefixed by + cpp_SYSROOT. */ + const char multilib; /* FNAME should have appended + - the multilib path specified with -imultilib + when set to 1, + - the multiarch path specified with + -imultiarch, when set to 2. */ +}; + +extern const struct default_include cpp_include_defaults[]; +extern const char cpp_GCC_INCLUDE_DIR[]; +extern const size_t cpp_GCC_INCLUDE_DIR_len; + +/* The configure-time prefix, i.e., the value supplied as the argument + to --prefix=. */ +extern const char cpp_PREFIX[]; +/* The length of the configure-time prefix. */ +extern const size_t cpp_PREFIX_len; +/* The configure-time execution prefix. This is typically the lib/gcc + subdirectory of cpp_PREFIX. */ +extern const char cpp_EXEC_PREFIX[]; +/* The run-time execution prefix. This is typically the lib/gcc + subdirectory of the actual installation. */ +extern const char *gcc_exec_prefix; + +/* Return true if the toolchain is relocated. */ +bool cpp_relocated (void); + +#endif /* ! GCC_CPPDEFAULT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cpplib.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cpplib.h new file mode 100644 index 0000000..3eba6f7 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cpplib.h @@ -0,0 +1,1568 @@ +/* Definitions for CPP library. + Copyright (C) 1995-2022 Free Software Foundation, Inc. + Written by Per Bothner, 1994-95. + +This program is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING3. If not see +. + + In other words, you are welcome to use, share and improve this program. + You are forbidden to forbid anyone else to use, share and improve + what you give them. Help stamp out software-hoarding! */ +#ifndef LIBCPP_CPPLIB_H +#define LIBCPP_CPPLIB_H + +#include +#include "symtab.h" +#include "line-map.h" + +typedef struct cpp_reader cpp_reader; +typedef struct cpp_buffer cpp_buffer; +typedef struct cpp_options cpp_options; +typedef struct cpp_token cpp_token; +typedef struct cpp_string cpp_string; +typedef struct cpp_hashnode cpp_hashnode; +typedef struct cpp_macro cpp_macro; +typedef struct cpp_callbacks cpp_callbacks; +typedef struct cpp_dir cpp_dir; + +struct _cpp_file; + +/* The first three groups, apart from '=', can appear in preprocessor + expressions (+= and -= are used to indicate unary + and - resp.). + This allows a lookup table to be implemented in _cpp_parse_expr. + + The first group, to CPP_LAST_EQ, can be immediately followed by an + '='. The lexer needs operators ending in '=', like ">>=", to be in + the same order as their counterparts without the '=', like ">>". + + See the cpp_operator table optab in expr.cc if you change the order or + add or remove anything in the first group. */ + +#define TTYPE_TABLE \ + OP(EQ, "=") \ + OP(NOT, "!") \ + OP(GREATER, ">") /* compare */ \ + OP(LESS, "<") \ + OP(PLUS, "+") /* math */ \ + OP(MINUS, "-") \ + OP(MULT, "*") \ + OP(DIV, "/") \ + OP(MOD, "%") \ + OP(AND, "&") /* bit ops */ \ + OP(OR, "|") \ + OP(XOR, "^") \ + OP(RSHIFT, ">>") \ + OP(LSHIFT, "<<") \ + \ + OP(COMPL, "~") \ + OP(AND_AND, "&&") /* logical */ \ + OP(OR_OR, "||") \ + OP(QUERY, "?") \ + OP(COLON, ":") \ + OP(COMMA, ",") /* grouping */ \ + OP(OPEN_PAREN, "(") \ + OP(CLOSE_PAREN, ")") \ + TK(EOF, NONE) \ + OP(EQ_EQ, "==") /* compare */ \ + OP(NOT_EQ, "!=") \ + OP(GREATER_EQ, ">=") \ + OP(LESS_EQ, "<=") \ + OP(SPACESHIP, "<=>") \ + \ + /* These two are unary + / - in preprocessor expressions. */ \ + OP(PLUS_EQ, "+=") /* math */ \ + OP(MINUS_EQ, "-=") \ + \ + OP(MULT_EQ, "*=") \ + OP(DIV_EQ, "/=") \ + OP(MOD_EQ, "%=") \ + OP(AND_EQ, "&=") /* bit ops */ \ + OP(OR_EQ, "|=") \ + OP(XOR_EQ, "^=") \ + OP(RSHIFT_EQ, ">>=") \ + OP(LSHIFT_EQ, "<<=") \ + /* Digraphs together, beginning with CPP_FIRST_DIGRAPH. */ \ + OP(HASH, "#") /* digraphs */ \ + OP(PASTE, "##") \ + OP(OPEN_SQUARE, "[") \ + OP(CLOSE_SQUARE, "]") \ + OP(OPEN_BRACE, "{") \ + OP(CLOSE_BRACE, "}") \ + /* The remainder of the punctuation. Order is not significant. */ \ + OP(SEMICOLON, ";") /* structure */ \ + OP(ELLIPSIS, "...") \ + OP(PLUS_PLUS, "++") /* increment */ \ + OP(MINUS_MINUS, "--") \ + OP(DEREF, "->") /* accessors */ \ + OP(DOT, ".") \ + OP(SCOPE, "::") \ + OP(DEREF_STAR, "->*") \ + OP(DOT_STAR, ".*") \ + OP(ATSIGN, "@") /* used in Objective-C */ \ + \ + TK(NAME, IDENT) /* word */ \ + TK(AT_NAME, IDENT) /* @word - Objective-C */ \ + TK(NUMBER, LITERAL) /* 34_be+ta */ \ + \ + TK(CHAR, LITERAL) /* 'char' */ \ + TK(WCHAR, LITERAL) /* L'char' */ \ + TK(CHAR16, LITERAL) /* u'char' */ \ + TK(CHAR32, LITERAL) /* U'char' */ \ + TK(UTF8CHAR, LITERAL) /* u8'char' */ \ + TK(OTHER, LITERAL) /* stray punctuation */ \ + \ + TK(STRING, LITERAL) /* "string" */ \ + TK(WSTRING, LITERAL) /* L"string" */ \ + TK(STRING16, LITERAL) /* u"string" */ \ + TK(STRING32, LITERAL) /* U"string" */ \ + TK(UTF8STRING, LITERAL) /* u8"string" */ \ + TK(OBJC_STRING, LITERAL) /* @"string" - Objective-C */ \ + TK(HEADER_NAME, LITERAL) /* in #include */ \ + \ + TK(CHAR_USERDEF, LITERAL) /* 'char'_suffix - C++-0x */ \ + TK(WCHAR_USERDEF, LITERAL) /* L'char'_suffix - C++-0x */ \ + TK(CHAR16_USERDEF, LITERAL) /* u'char'_suffix - C++-0x */ \ + TK(CHAR32_USERDEF, LITERAL) /* U'char'_suffix - C++-0x */ \ + TK(UTF8CHAR_USERDEF, LITERAL) /* u8'char'_suffix - C++-0x */ \ + TK(STRING_USERDEF, LITERAL) /* "string"_suffix - C++-0x */ \ + TK(WSTRING_USERDEF, LITERAL) /* L"string"_suffix - C++-0x */ \ + TK(STRING16_USERDEF, LITERAL) /* u"string"_suffix - C++-0x */ \ + TK(STRING32_USERDEF, LITERAL) /* U"string"_suffix - C++-0x */ \ + TK(UTF8STRING_USERDEF,LITERAL) /* u8"string"_suffix - C++-0x */ \ + \ + TK(COMMENT, LITERAL) /* Only if output comments. */ \ + /* SPELL_LITERAL happens to DTRT. */ \ + TK(MACRO_ARG, NONE) /* Macro argument. */ \ + TK(PRAGMA, NONE) /* Only for deferred pragmas. */ \ + TK(PRAGMA_EOL, NONE) /* End-of-line for deferred pragmas. */ \ + TK(PADDING, NONE) /* Whitespace for -E. */ + +#define OP(e, s) CPP_ ## e, +#define TK(e, s) CPP_ ## e, +enum cpp_ttype +{ + TTYPE_TABLE + N_TTYPES, + + /* A token type for keywords, as opposed to ordinary identifiers. */ + CPP_KEYWORD, + + /* Positions in the table. */ + CPP_LAST_EQ = CPP_LSHIFT, + CPP_FIRST_DIGRAPH = CPP_HASH, + CPP_LAST_PUNCTUATOR= CPP_ATSIGN, + CPP_LAST_CPP_OP = CPP_LESS_EQ +}; +#undef OP +#undef TK + +/* C language kind, used when calling cpp_create_reader. */ +enum c_lang {CLK_GNUC89 = 0, CLK_GNUC99, CLK_GNUC11, CLK_GNUC17, CLK_GNUC2X, + CLK_STDC89, CLK_STDC94, CLK_STDC99, CLK_STDC11, CLK_STDC17, + CLK_STDC2X, + CLK_GNUCXX, CLK_CXX98, CLK_GNUCXX11, CLK_CXX11, + CLK_GNUCXX14, CLK_CXX14, CLK_GNUCXX17, CLK_CXX17, + CLK_GNUCXX20, CLK_CXX20, CLK_GNUCXX23, CLK_CXX23, + CLK_ASM}; + +/* Payload of a NUMBER, STRING, CHAR or COMMENT token. */ +struct GTY(()) cpp_string { + unsigned int len; + const unsigned char *text; +}; + +/* Flags for the cpp_token structure. */ +#define PREV_WHITE (1 << 0) /* If whitespace before this token. */ +#define DIGRAPH (1 << 1) /* If it was a digraph. */ +#define STRINGIFY_ARG (1 << 2) /* If macro argument to be stringified. */ +#define PASTE_LEFT (1 << 3) /* If on LHS of a ## operator. */ +#define NAMED_OP (1 << 4) /* C++ named operators. */ +#define PREV_FALLTHROUGH (1 << 5) /* On a token preceeded by FALLTHROUGH + comment. */ +#define BOL (1 << 6) /* Token at beginning of line. */ +#define PURE_ZERO (1 << 7) /* Single 0 digit, used by the C++ frontend, + set in c-lex.cc. */ +#define SP_DIGRAPH (1 << 8) /* # or ## token was a digraph. */ +#define SP_PREV_WHITE (1 << 9) /* If whitespace before a ## + operator, or before this token + after a # operator. */ +#define NO_EXPAND (1 << 10) /* Do not macro-expand this token. */ +#define PRAGMA_OP (1 << 11) /* _Pragma token. */ + +/* Specify which field, if any, of the cpp_token union is used. */ + +enum cpp_token_fld_kind { + CPP_TOKEN_FLD_NODE, + CPP_TOKEN_FLD_SOURCE, + CPP_TOKEN_FLD_STR, + CPP_TOKEN_FLD_ARG_NO, + CPP_TOKEN_FLD_TOKEN_NO, + CPP_TOKEN_FLD_PRAGMA, + CPP_TOKEN_FLD_NONE +}; + +/* A macro argument in the cpp_token union. */ +struct GTY(()) cpp_macro_arg { + /* Argument number. */ + unsigned int arg_no; + /* The original spelling of the macro argument token. */ + cpp_hashnode * + GTY ((nested_ptr (union tree_node, + "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL", + "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL"))) + spelling; +}; + +/* An identifier in the cpp_token union. */ +struct GTY(()) cpp_identifier { + /* The canonical (UTF-8) spelling of the identifier. */ + cpp_hashnode * + GTY ((nested_ptr (union tree_node, + "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL", + "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL"))) + node; + /* The original spelling of the identifier. */ + cpp_hashnode * + GTY ((nested_ptr (union tree_node, + "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL", + "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL"))) + spelling; +}; + +/* A preprocessing token. This has been carefully packed and should + occupy 16 bytes on 32-bit hosts and 24 bytes on 64-bit hosts. */ +struct GTY(()) cpp_token { + + /* Location of first char of token, together with range of full token. */ + location_t src_loc; + + ENUM_BITFIELD(cpp_ttype) type : CHAR_BIT; /* token type */ + unsigned short flags; /* flags - see above */ + + union cpp_token_u + { + /* An identifier. */ + struct cpp_identifier GTY ((tag ("CPP_TOKEN_FLD_NODE"))) node; + + /* Inherit padding from this token. */ + cpp_token * GTY ((tag ("CPP_TOKEN_FLD_SOURCE"))) source; + + /* A string, or number. */ + struct cpp_string GTY ((tag ("CPP_TOKEN_FLD_STR"))) str; + + /* Argument no. (and original spelling) for a CPP_MACRO_ARG. */ + struct cpp_macro_arg GTY ((tag ("CPP_TOKEN_FLD_ARG_NO"))) macro_arg; + + /* Original token no. for a CPP_PASTE (from a sequence of + consecutive paste tokens in a macro expansion). */ + unsigned int GTY ((tag ("CPP_TOKEN_FLD_TOKEN_NO"))) token_no; + + /* Caller-supplied identifier for a CPP_PRAGMA. */ + unsigned int GTY ((tag ("CPP_TOKEN_FLD_PRAGMA"))) pragma; + } GTY ((desc ("cpp_token_val_index (&%1)"))) val; +}; + +/* Say which field is in use. */ +extern enum cpp_token_fld_kind cpp_token_val_index (const cpp_token *tok); + +/* A type wide enough to hold any multibyte source character. + cpplib's character constant interpreter requires an unsigned type. + Also, a typedef for the signed equivalent. + The width of this type is capped at 32 bits; there do exist targets + where wchar_t is 64 bits, but only in a non-default mode, and there + would be no meaningful interpretation for a wchar_t value greater + than 2^32 anyway -- the widest wide-character encoding around is + ISO 10646, which stops at 2^31. */ +#if CHAR_BIT * SIZEOF_INT >= 32 +# define CPPCHAR_SIGNED_T int +#elif CHAR_BIT * SIZEOF_LONG >= 32 +# define CPPCHAR_SIGNED_T long +#else +# error "Cannot find a least-32-bit signed integer type" +#endif +typedef unsigned CPPCHAR_SIGNED_T cppchar_t; +typedef CPPCHAR_SIGNED_T cppchar_signed_t; + +/* Style of header dependencies to generate. */ +enum cpp_deps_style { DEPS_NONE = 0, DEPS_USER, DEPS_SYSTEM }; + +/* The possible normalization levels, from most restrictive to least. */ +enum cpp_normalize_level { + /* In NFKC. */ + normalized_KC = 0, + /* In NFC. */ + normalized_C, + /* In NFC, except for subsequences where being in NFC would make + the identifier invalid. */ + normalized_identifier_C, + /* Not normalized at all. */ + normalized_none +}; + +enum cpp_main_search +{ + CMS_none, /* A regular source file. */ + CMS_header, /* Is a directly-specified header file (eg PCH or + header-unit). */ + CMS_user, /* Search the user INCLUDE path. */ + CMS_system, /* Search the system INCLUDE path. */ +}; + +/* The possible bidirectional control characters checking levels. */ +enum cpp_bidirectional_level { + /* No checking. */ + bidirectional_none = 0, + /* Only detect unpaired uses of bidirectional control characters. */ + bidirectional_unpaired = 1, + /* Detect any use of bidirectional control characters. */ + bidirectional_any = 2, + /* Also warn about UCNs. */ + bidirectional_ucn = 4 +}; + +/* This structure is nested inside struct cpp_reader, and + carries all the options visible to the command line. */ +struct cpp_options +{ + /* The language we're preprocessing. */ + enum c_lang lang; + + /* Nonzero means use extra default include directories for C++. */ + unsigned char cplusplus; + + /* Nonzero means handle cplusplus style comments. */ + unsigned char cplusplus_comments; + + /* Nonzero means define __OBJC__, treat @ as a special token, use + the OBJC[PLUS]_INCLUDE_PATH environment variable, and allow + "#import". */ + unsigned char objc; + + /* Nonzero means don't copy comments into the output file. */ + unsigned char discard_comments; + + /* Nonzero means don't copy comments into the output file during + macro expansion. */ + unsigned char discard_comments_in_macro_exp; + + /* Nonzero means process the ISO trigraph sequences. */ + unsigned char trigraphs; + + /* Nonzero means process the ISO digraph sequences. */ + unsigned char digraphs; + + /* Nonzero means to allow hexadecimal floats and LL suffixes. */ + unsigned char extended_numbers; + + /* Nonzero means process u/U prefix literals (UTF-16/32). */ + unsigned char uliterals; + + /* Nonzero means process u8 prefixed character literals (UTF-8). */ + unsigned char utf8_char_literals; + + /* Nonzero means process r/R raw strings. If this is set, uliterals + must be set as well. */ + unsigned char rliterals; + + /* Nonzero means print names of header files (-H). */ + unsigned char print_include_names; + + /* Nonzero means complain about deprecated features. */ + unsigned char cpp_warn_deprecated; + + /* Nonzero means warn if slash-star appears in a comment. */ + unsigned char warn_comments; + + /* Nonzero means to warn about __DATA__, __TIME__ and __TIMESTAMP__ usage. */ + unsigned char warn_date_time; + + /* Nonzero means warn if a user-supplied include directory does not + exist. */ + unsigned char warn_missing_include_dirs; + + /* Nonzero means warn if there are any trigraphs. */ + unsigned char warn_trigraphs; + + /* Nonzero means warn about multicharacter charconsts. */ + unsigned char warn_multichar; + + /* Nonzero means warn about various incompatibilities with + traditional C. */ + unsigned char cpp_warn_traditional; + + /* Nonzero means warn about long long numeric constants. */ + unsigned char cpp_warn_long_long; + + /* Nonzero means warn about text after an #endif (or #else). */ + unsigned char warn_endif_labels; + + /* Nonzero means warn about implicit sign changes owing to integer + promotions. */ + unsigned char warn_num_sign_change; + + /* Zero means don't warn about __VA_ARGS__ usage in c89 pedantic mode. + Presumably the usage is protected by the appropriate #ifdef. */ + unsigned char warn_variadic_macros; + + /* Nonzero means warn about builtin macros that are redefined or + explicitly undefined. */ + unsigned char warn_builtin_macro_redefined; + + /* Different -Wimplicit-fallthrough= levels. */ + unsigned char cpp_warn_implicit_fallthrough; + + /* Nonzero means we should look for header.gcc files that remap file + names. */ + unsigned char remap; + + /* Zero means dollar signs are punctuation. */ + unsigned char dollars_in_ident; + + /* Nonzero means UCNs are accepted in identifiers. */ + unsigned char extended_identifiers; + + /* True if we should warn about dollars in identifiers or numbers + for this translation unit. */ + unsigned char warn_dollars; + + /* Nonzero means warn if undefined identifiers are evaluated in an #if. */ + unsigned char warn_undef; + + /* Nonzero means warn if "defined" is encountered in a place other than + an #if. */ + unsigned char warn_expansion_to_defined; + + /* Nonzero means warn of unused macros from the main file. */ + unsigned char warn_unused_macros; + + /* Nonzero for the 1999 C Standard, including corrigenda and amendments. */ + unsigned char c99; + + /* Nonzero if we are conforming to a specific C or C++ standard. */ + unsigned char std; + + /* Nonzero means give all the error messages the ANSI standard requires. */ + unsigned char cpp_pedantic; + + /* Nonzero means we're looking at already preprocessed code, so don't + bother trying to do macro expansion and whatnot. */ + unsigned char preprocessed; + + /* Nonzero means we are going to emit debugging logs during + preprocessing. */ + unsigned char debug; + + /* Nonzero means we are tracking locations of tokens involved in + macro expansion. 1 Means we track the location in degraded mode + where we do not track locations of tokens resulting from the + expansion of arguments of function-like macro. 2 Means we do + track all macro expansions. This last option is the one that + consumes the highest amount of memory. */ + unsigned char track_macro_expansion; + + /* Nonzero means handle C++ alternate operator names. */ + unsigned char operator_names; + + /* Nonzero means warn about use of C++ alternate operator names. */ + unsigned char warn_cxx_operator_names; + + /* True for traditional preprocessing. */ + unsigned char traditional; + + /* Nonzero for C++ 2011 Standard user-defined literals. */ + unsigned char user_literals; + + /* Nonzero means warn when a string or character literal is followed by a + ud-suffix which does not beging with an underscore. */ + unsigned char warn_literal_suffix; + + /* Nonzero means interpret imaginary, fixed-point, or other gnu extension + literal number suffixes as user-defined literal number suffixes. */ + unsigned char ext_numeric_literals; + + /* Nonzero means extended identifiers allow the characters specified + in C11. */ + unsigned char c11_identifiers; + + /* Nonzero for C++ 2014 Standard binary constants. */ + unsigned char binary_constants; + + /* Nonzero for C++ 2014 Standard digit separators. */ + unsigned char digit_separators; + + /* Nonzero for C2X decimal floating-point constants. */ + unsigned char dfp_constants; + + /* Nonzero for C++20 __VA_OPT__ feature. */ + unsigned char va_opt; + + /* Nonzero for the '::' token. */ + unsigned char scope; + + /* Nonzero for the '#elifdef' and '#elifndef' directives. */ + unsigned char elifdef; + + /* Nonzero means tokenize C++20 module directives. */ + unsigned char module_directives; + + /* Nonzero for C++23 size_t literals. */ + unsigned char size_t_literals; + + /* Holds the name of the target (execution) character set. */ + const char *narrow_charset; + + /* Holds the name of the target wide character set. */ + const char *wide_charset; + + /* Holds the name of the input character set. */ + const char *input_charset; + + /* The minimum permitted level of normalization before a warning + is generated. See enum cpp_normalize_level. */ + int warn_normalize; + + /* True to warn about precompiled header files we couldn't use. */ + bool warn_invalid_pch; + + /* True if dependencies should be restored from a precompiled header. */ + bool restore_pch_deps; + + /* True if warn about differences between C90 and C99. */ + signed char cpp_warn_c90_c99_compat; + + /* True if warn about differences between C11 and C2X. */ + signed char cpp_warn_c11_c2x_compat; + + /* True if warn about differences between C++98 and C++11. */ + bool cpp_warn_cxx11_compat; + + /* Nonzero if bidirectional control characters checking is on. See enum + cpp_bidirectional_level. */ + unsigned char cpp_warn_bidirectional; + + /* Dependency generation. */ + struct + { + /* Style of header dependencies to generate. */ + enum cpp_deps_style style; + + /* Assume missing files are generated files. */ + bool missing_files; + + /* Generate phony targets for each dependency apart from the first + one. */ + bool phony_targets; + + /* Generate dependency info for modules. */ + bool modules; + + /* If true, no dependency is generated on the main file. */ + bool ignore_main_file; + + /* If true, intend to use the preprocessor output (e.g., for compilation) + in addition to the dependency info. */ + bool need_preprocessor_output; + } deps; + + /* Target-specific features set by the front end or client. */ + + /* Precision for target CPP arithmetic, target characters, target + ints and target wide characters, respectively. */ + size_t precision, char_precision, int_precision, wchar_precision; + + /* True means chars (wide chars) are unsigned. */ + bool unsigned_char, unsigned_wchar; + + /* True if the most significant byte in a word has the lowest + address in memory. */ + bool bytes_big_endian; + + /* Nonzero means __STDC__ should have the value 0 in system headers. */ + unsigned char stdc_0_in_system_headers; + + /* True disables tokenization outside of preprocessing directives. */ + bool directives_only; + + /* True enables canonicalization of system header file paths. */ + bool canonical_system_headers; + + /* The maximum depth of the nested #include. */ + unsigned int max_include_depth; + + cpp_main_search main_search : 8; +}; + +/* Diagnostic levels. To get a diagnostic without associating a + position in the translation unit with it, use cpp_error_with_line + with a line number of zero. */ + +enum cpp_diagnostic_level { + /* Warning, an error with -Werror. */ + CPP_DL_WARNING = 0, + /* Same as CPP_DL_WARNING, except it is not suppressed in system headers. */ + CPP_DL_WARNING_SYSHDR, + /* Warning, an error with -pedantic-errors or -Werror. */ + CPP_DL_PEDWARN, + /* An error. */ + CPP_DL_ERROR, + /* An internal consistency check failed. Prints "internal error: ", + otherwise the same as CPP_DL_ERROR. */ + CPP_DL_ICE, + /* An informative note following a warning. */ + CPP_DL_NOTE, + /* A fatal error. */ + CPP_DL_FATAL +}; + +/* Warning reason codes. Use a reason code of CPP_W_NONE for unclassified + warnings and diagnostics that are not warnings. */ + +enum cpp_warning_reason { + CPP_W_NONE = 0, + CPP_W_DEPRECATED, + CPP_W_COMMENTS, + CPP_W_MISSING_INCLUDE_DIRS, + CPP_W_TRIGRAPHS, + CPP_W_MULTICHAR, + CPP_W_TRADITIONAL, + CPP_W_LONG_LONG, + CPP_W_ENDIF_LABELS, + CPP_W_NUM_SIGN_CHANGE, + CPP_W_VARIADIC_MACROS, + CPP_W_BUILTIN_MACRO_REDEFINED, + CPP_W_DOLLARS, + CPP_W_UNDEF, + CPP_W_UNUSED_MACROS, + CPP_W_CXX_OPERATOR_NAMES, + CPP_W_NORMALIZE, + CPP_W_INVALID_PCH, + CPP_W_WARNING_DIRECTIVE, + CPP_W_LITERAL_SUFFIX, + CPP_W_SIZE_T_LITERALS, + CPP_W_DATE_TIME, + CPP_W_PEDANTIC, + CPP_W_C90_C99_COMPAT, + CPP_W_C11_C2X_COMPAT, + CPP_W_CXX11_COMPAT, + CPP_W_EXPANSION_TO_DEFINED, + CPP_W_BIDIRECTIONAL +}; + +/* Callback for header lookup for HEADER, which is the name of a + source file. It is used as a method of last resort to find headers + that are not otherwise found during the normal include processing. + The return value is the malloced name of a header to try and open, + if any, or NULL otherwise. This callback is called only if the + header is otherwise unfound. */ +typedef const char *(*missing_header_cb)(cpp_reader *, const char *header, cpp_dir **); + +/* Call backs to cpplib client. */ +struct cpp_callbacks +{ + /* Called when a new line of preprocessed output is started. */ + void (*line_change) (cpp_reader *, const cpp_token *, int); + + /* Called when switching to/from a new file. + The line_map is for the new file. It is NULL if there is no new file. + (In C this happens when done with + and also + when done with a main file.) This can be used for resource cleanup. */ + void (*file_change) (cpp_reader *, const line_map_ordinary *); + + void (*dir_change) (cpp_reader *, const char *); + void (*include) (cpp_reader *, location_t, const unsigned char *, + const char *, int, const cpp_token **); + void (*define) (cpp_reader *, location_t, cpp_hashnode *); + void (*undef) (cpp_reader *, location_t, cpp_hashnode *); + void (*ident) (cpp_reader *, location_t, const cpp_string *); + void (*def_pragma) (cpp_reader *, location_t); + int (*valid_pch) (cpp_reader *, const char *, int); + void (*read_pch) (cpp_reader *, const char *, int, const char *); + missing_header_cb missing_header; + + /* Context-sensitive macro support. Returns macro (if any) that should + be expanded. */ + cpp_hashnode * (*macro_to_expand) (cpp_reader *, const cpp_token *); + + /* Called to emit a diagnostic. This callback receives the + translated message. */ + bool (*diagnostic) (cpp_reader *, + enum cpp_diagnostic_level, + enum cpp_warning_reason, + rich_location *, + const char *, va_list *) + ATTRIBUTE_FPTR_PRINTF(5,0); + + /* Callbacks for when a macro is expanded, or tested (whether + defined or not at the time) in #ifdef, #ifndef or "defined". */ + void (*used_define) (cpp_reader *, location_t, cpp_hashnode *); + void (*used_undef) (cpp_reader *, location_t, cpp_hashnode *); + /* Called before #define and #undef or other macro definition + changes are processed. */ + void (*before_define) (cpp_reader *); + /* Called whenever a macro is expanded or tested. + Second argument is the location of the start of the current expansion. */ + void (*used) (cpp_reader *, location_t, cpp_hashnode *); + + /* Callback to identify whether an attribute exists. */ + int (*has_attribute) (cpp_reader *, bool); + + /* Callback to determine whether a built-in function is recognized. */ + int (*has_builtin) (cpp_reader *); + + /* Callback that can change a user lazy into normal macro. */ + void (*user_lazy_macro) (cpp_reader *, cpp_macro *, unsigned); + + /* Callback to handle deferred cpp_macros. */ + cpp_macro *(*user_deferred_macro) (cpp_reader *, location_t, cpp_hashnode *); + + /* Callback to parse SOURCE_DATE_EPOCH from environment. */ + time_t (*get_source_date_epoch) (cpp_reader *); + + /* Callback for providing suggestions for misspelled directives. */ + const char *(*get_suggestion) (cpp_reader *, const char *, const char *const *); + + /* Callback for when a comment is encountered, giving the location + of the opening slash, a pointer to the content (which is not + necessarily 0-terminated), and the length of the content. + The content contains the opening slash-star (or slash-slash), + and for C-style comments contains the closing star-slash. For + C++-style comments it does not include the terminating newline. */ + void (*comment) (cpp_reader *, location_t, const unsigned char *, + size_t); + + /* Callback for filename remapping in __FILE__ and __BASE_FILE__ macro + expansions. */ + const char *(*remap_filename) (const char*); + + /* Maybe translate a #include into something else. Return a + cpp_buffer containing the translation if translating. */ + char *(*translate_include) (cpp_reader *, line_maps *, location_t, + const char *path); +}; + +#ifdef VMS +#define INO_T_CPP ino_t ino[3] +#elif defined (_AIX) && SIZEOF_INO_T == 4 +#define INO_T_CPP ino64_t ino +#else +#define INO_T_CPP ino_t ino +#endif + +#if defined (_AIX) && SIZEOF_DEV_T == 4 +#define DEV_T_CPP dev64_t dev +#else +#define DEV_T_CPP dev_t dev +#endif + +/* Chain of directories to look for include files in. */ +struct cpp_dir +{ + /* NULL-terminated singly-linked list. */ + struct cpp_dir *next; + + /* NAME of the directory, NUL-terminated. */ + char *name; + unsigned int len; + + /* One if a system header, two if a system header that has extern + "C" guards for C++. */ + unsigned char sysp; + + /* Is this a user-supplied directory? */ + bool user_supplied_p; + + /* The canonicalized NAME as determined by lrealpath. This field + is only used by hosts that lack reliable inode numbers. */ + char *canonical_name; + + /* Mapping of file names for this directory for MS-DOS and related + platforms. A NULL-terminated array of (from, to) pairs. */ + const char **name_map; + + /* Routine to construct pathname, given the search path name and the + HEADER we are trying to find, return a constructed pathname to + try and open. If this is NULL, the constructed pathname is as + constructed by append_file_to_dir. */ + char *(*construct) (const char *header, cpp_dir *dir); + + /* The C front end uses these to recognize duplicated + directories in the search path. */ + INO_T_CPP; + DEV_T_CPP; +}; + +/* The kind of the cpp_macro. */ +enum cpp_macro_kind { + cmk_macro, /* An ISO macro (token expansion). */ + cmk_assert, /* An assertion. */ + cmk_traditional /* A traditional macro (text expansion). */ +}; + +/* Each macro definition is recorded in a cpp_macro structure. + Variadic macros cannot occur with traditional cpp. */ +struct GTY(()) cpp_macro { + union cpp_parm_u + { + /* Parameters, if any. If parameter names use extended identifiers, + the original spelling of those identifiers, not the canonical + UTF-8 spelling, goes here. */ + cpp_hashnode ** GTY ((tag ("false"), + nested_ptr (union tree_node, + "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL", + "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL"), + length ("%1.paramc"))) params; + + /* If this is an assertion, the next one in the chain. */ + cpp_macro *GTY ((tag ("true"))) next; + } GTY ((desc ("%1.kind == cmk_assert"))) parm; + + /* Definition line number. */ + location_t line; + + /* Number of tokens in body, or bytes for traditional macros. */ + /* Do we really need 2^32-1 range here? */ + unsigned int count; + + /* Number of parameters. */ + unsigned short paramc; + + /* Non-zero if this is a user-lazy macro, value provided by user. */ + unsigned char lazy; + + /* The kind of this macro (ISO, trad or assert) */ + unsigned kind : 2; + + /* If a function-like macro. */ + unsigned int fun_like : 1; + + /* If a variadic macro. */ + unsigned int variadic : 1; + + /* If macro defined in system header. */ + unsigned int syshdr : 1; + + /* Nonzero if it has been expanded or had its existence tested. */ + unsigned int used : 1; + + /* Indicate whether the tokens include extra CPP_PASTE tokens at the + end to track invalid redefinitions with consecutive CPP_PASTE + tokens. */ + unsigned int extra_tokens : 1; + + /* Imported C++20 macro (from a header unit). */ + unsigned int imported_p : 1; + + /* 0 bits spare (32-bit). 32 on 64-bit target. */ + + union cpp_exp_u + { + /* Trailing array of replacement tokens (ISO), or assertion body value. */ + cpp_token GTY ((tag ("false"), length ("%1.count"))) tokens[1]; + + /* Pointer to replacement text (traditional). See comment at top + of cpptrad.c for how traditional function-like macros are + encoded. */ + const unsigned char *GTY ((tag ("true"))) text; + } GTY ((desc ("%1.kind == cmk_traditional"))) exp; +}; + +/* Poisoned identifiers are flagged NODE_POISONED. NODE_OPERATOR (C++ + only) indicates an identifier that behaves like an operator such as + "xor". NODE_DIAGNOSTIC is for speed in lex_token: it indicates a + diagnostic may be required for this node. Currently this only + applies to __VA_ARGS__, poisoned identifiers, and -Wc++-compat + warnings about NODE_OPERATOR. */ + +/* Hash node flags. */ +#define NODE_OPERATOR (1 << 0) /* C++ named operator. */ +#define NODE_POISONED (1 << 1) /* Poisoned identifier. */ +#define NODE_DIAGNOSTIC (1 << 2) /* Possible diagnostic when lexed. */ +#define NODE_WARN (1 << 3) /* Warn if redefined or undefined. */ +#define NODE_DISABLED (1 << 4) /* A disabled macro. */ +#define NODE_USED (1 << 5) /* Dumped with -dU. */ +#define NODE_CONDITIONAL (1 << 6) /* Conditional macro */ +#define NODE_WARN_OPERATOR (1 << 7) /* Warn about C++ named operator. */ +#define NODE_MODULE (1 << 8) /* C++-20 module-related name. */ + +/* Different flavors of hash node. */ +enum node_type +{ + NT_VOID = 0, /* Maybe an assert? */ + NT_MACRO_ARG, /* A macro arg. */ + NT_USER_MACRO, /* A user macro. */ + NT_BUILTIN_MACRO, /* A builtin macro. */ + NT_MACRO_MASK = NT_USER_MACRO /* Mask for either macro kind. */ +}; + +/* Different flavors of builtin macro. _Pragma is an operator, but we + handle it with the builtin code for efficiency reasons. */ +enum cpp_builtin_type +{ + BT_SPECLINE = 0, /* `__LINE__' */ + BT_DATE, /* `__DATE__' */ + BT_FILE, /* `__FILE__' */ + BT_FILE_NAME, /* `__FILE_NAME__' */ + BT_BASE_FILE, /* `__BASE_FILE__' */ + BT_INCLUDE_LEVEL, /* `__INCLUDE_LEVEL__' */ + BT_TIME, /* `__TIME__' */ + BT_STDC, /* `__STDC__' */ + BT_PRAGMA, /* `_Pragma' operator */ + BT_TIMESTAMP, /* `__TIMESTAMP__' */ + BT_COUNTER, /* `__COUNTER__' */ + BT_HAS_ATTRIBUTE, /* `__has_attribute(x)' */ + BT_HAS_STD_ATTRIBUTE, /* `__has_c_attribute(x)' */ + BT_HAS_BUILTIN, /* `__has_builtin(x)' */ + BT_HAS_INCLUDE, /* `__has_include(x)' */ + BT_HAS_INCLUDE_NEXT /* `__has_include_next(x)' */ +}; + +#define CPP_HASHNODE(HNODE) ((cpp_hashnode *) (HNODE)) +#define HT_NODE(NODE) (&(NODE)->ident) +#define NODE_LEN(NODE) HT_LEN (HT_NODE (NODE)) +#define NODE_NAME(NODE) HT_STR (HT_NODE (NODE)) + +/* The common part of an identifier node shared amongst all 3 C front + ends. Also used to store CPP identifiers, which are a superset of + identifiers in the grammatical sense. */ + +union GTY(()) _cpp_hashnode_value { + /* Assert (maybe NULL) */ + cpp_macro * GTY((tag ("NT_VOID"))) answers; + /* Macro (maybe NULL) */ + cpp_macro * GTY((tag ("NT_USER_MACRO"))) macro; + /* Code for a builtin macro. */ + enum cpp_builtin_type GTY ((tag ("NT_BUILTIN_MACRO"))) builtin; + /* Macro argument index. */ + unsigned short GTY ((tag ("NT_MACRO_ARG"))) arg_index; +}; + +struct GTY(()) cpp_hashnode { + struct ht_identifier ident; + unsigned int is_directive : 1; + unsigned int directive_index : 7; /* If is_directive, + then index into directive table. + Otherwise, a NODE_OPERATOR. */ + unsigned int rid_code : 8; /* Rid code - for front ends. */ + unsigned int flags : 9; /* CPP flags. */ + ENUM_BITFIELD(node_type) type : 2; /* CPP node type. */ + + /* 5 bits spare. */ + + /* The deferred cookie is applicable to NT_USER_MACRO or NT_VOID. + The latter for when a macro had a prevailing undef. + On a 64-bit system there would be 32-bits of padding to the value + field. So placing the deferred index here is not costly. */ + unsigned deferred; /* Deferred cookie */ + + union _cpp_hashnode_value GTY ((desc ("%1.type"))) value; +}; + +/* A class for iterating through the source locations within a + string token (before escapes are interpreted, and before + concatenation). */ + +class cpp_string_location_reader { + public: + cpp_string_location_reader (location_t src_loc, + line_maps *line_table); + + source_range get_next (); + + private: + location_t m_loc; + int m_offset_per_column; +}; + +/* A class for storing the source ranges of all of the characters within + a string literal, after escapes are interpreted, and after + concatenation. + + This is not GTY-marked, as instances are intended to be temporary. */ + +class cpp_substring_ranges +{ + public: + cpp_substring_ranges (); + ~cpp_substring_ranges (); + + int get_num_ranges () const { return m_num_ranges; } + source_range get_range (int idx) const + { + linemap_assert (idx < m_num_ranges); + return m_ranges[idx]; + } + + void add_range (source_range range); + void add_n_ranges (int num, cpp_string_location_reader &loc_reader); + + private: + source_range *m_ranges; + int m_num_ranges; + int m_alloc_ranges; +}; + +/* Call this first to get a handle to pass to other functions. + + If you want cpplib to manage its own hashtable, pass in a NULL + pointer. Otherwise you should pass in an initialized hash table + that cpplib will share; this technique is used by the C front + ends. */ +extern cpp_reader *cpp_create_reader (enum c_lang, struct ht *, + class line_maps *); + +/* Reset the cpp_reader's line_map. This is only used after reading a + PCH file. */ +extern void cpp_set_line_map (cpp_reader *, class line_maps *); + +/* Call this to change the selected language standard (e.g. because of + command line options). */ +extern void cpp_set_lang (cpp_reader *, enum c_lang); + +/* Set the include paths. */ +extern void cpp_set_include_chains (cpp_reader *, cpp_dir *, cpp_dir *, int); + +/* Call these to get pointers to the options, callback, and deps + structures for a given reader. These pointers are good until you + call cpp_finish on that reader. You can either edit the callbacks + through the pointer returned from cpp_get_callbacks, or set them + with cpp_set_callbacks. */ +extern cpp_options *cpp_get_options (cpp_reader *) ATTRIBUTE_PURE; +extern cpp_callbacks *cpp_get_callbacks (cpp_reader *) ATTRIBUTE_PURE; +extern void cpp_set_callbacks (cpp_reader *, cpp_callbacks *); +extern class mkdeps *cpp_get_deps (cpp_reader *) ATTRIBUTE_PURE; + +extern const char *cpp_probe_header_unit (cpp_reader *, const char *file, + bool angle_p, location_t); + +/* Call these to get name data about the various compile-time + charsets. */ +extern const char *cpp_get_narrow_charset_name (cpp_reader *) ATTRIBUTE_PURE; +extern const char *cpp_get_wide_charset_name (cpp_reader *) ATTRIBUTE_PURE; + +/* This function reads the file, but does not start preprocessing. It + returns the name of the original file; this is the same as the + input file, except for preprocessed input. This will generate at + least one file change callback, and possibly a line change callback + too. If there was an error opening the file, it returns NULL. */ +extern const char *cpp_read_main_file (cpp_reader *, const char *, + bool injecting = false); +extern location_t cpp_main_loc (const cpp_reader *); + +/* Adjust for the main file to be an include. */ +extern void cpp_retrofit_as_include (cpp_reader *); + +/* Set up built-ins with special behavior. Use cpp_init_builtins() + instead unless your know what you are doing. */ +extern void cpp_init_special_builtins (cpp_reader *); + +/* Set up built-ins like __FILE__. */ +extern void cpp_init_builtins (cpp_reader *, int); + +/* This is called after options have been parsed, and partially + processed. */ +extern void cpp_post_options (cpp_reader *); + +/* Set up translation to the target character set. */ +extern void cpp_init_iconv (cpp_reader *); + +/* Call this to finish preprocessing. If you requested dependency + generation, pass an open stream to write the information to, + otherwise NULL. It is your responsibility to close the stream. */ +extern void cpp_finish (cpp_reader *, FILE *deps_stream); + +/* Call this to release the handle at the end of preprocessing. Any + use of the handle after this function returns is invalid. */ +extern void cpp_destroy (cpp_reader *); + +extern unsigned int cpp_token_len (const cpp_token *); +extern unsigned char *cpp_token_as_text (cpp_reader *, const cpp_token *); +extern unsigned char *cpp_spell_token (cpp_reader *, const cpp_token *, + unsigned char *, bool); +extern void cpp_register_pragma (cpp_reader *, const char *, const char *, + void (*) (cpp_reader *), bool); +extern void cpp_register_deferred_pragma (cpp_reader *, const char *, + const char *, unsigned, bool, bool); +extern int cpp_avoid_paste (cpp_reader *, const cpp_token *, + const cpp_token *); +extern const cpp_token *cpp_get_token (cpp_reader *); +extern const cpp_token *cpp_get_token_with_location (cpp_reader *, + location_t *); +inline bool cpp_user_macro_p (const cpp_hashnode *node) +{ + return node->type == NT_USER_MACRO; +} +inline bool cpp_builtin_macro_p (const cpp_hashnode *node) +{ + return node->type == NT_BUILTIN_MACRO; +} +inline bool cpp_macro_p (const cpp_hashnode *node) +{ + return node->type & NT_MACRO_MASK; +} +inline cpp_macro *cpp_set_deferred_macro (cpp_hashnode *node, + cpp_macro *forced = NULL) +{ + cpp_macro *old = node->value.macro; + + node->value.macro = forced; + node->type = NT_USER_MACRO; + node->flags &= ~NODE_USED; + + return old; +} +cpp_macro *cpp_get_deferred_macro (cpp_reader *, cpp_hashnode *, location_t); + +/* Returns true if NODE is a function-like user macro. */ +inline bool cpp_fun_like_macro_p (cpp_hashnode *node) +{ + return cpp_user_macro_p (node) && node->value.macro->fun_like; +} + +extern const unsigned char *cpp_macro_definition (cpp_reader *, cpp_hashnode *); +extern const unsigned char *cpp_macro_definition (cpp_reader *, cpp_hashnode *, + const cpp_macro *); +inline location_t cpp_macro_definition_location (cpp_hashnode *node) +{ + const cpp_macro *macro = node->value.macro; + return macro ? macro->line : 0; +} +/* Return an idempotent time stamp (possibly from SOURCE_DATE_EPOCH). */ +enum class CPP_time_kind +{ + FIXED = -1, /* Fixed time via source epoch. */ + DYNAMIC = -2, /* Dynamic via time(2). */ + UNKNOWN = -3 /* Wibbly wobbly, timey wimey. */ +}; +extern CPP_time_kind cpp_get_date (cpp_reader *, time_t *); + +extern void _cpp_backup_tokens (cpp_reader *, unsigned int); +extern const cpp_token *cpp_peek_token (cpp_reader *, int); + +/* Evaluate a CPP_*CHAR* token. */ +extern cppchar_t cpp_interpret_charconst (cpp_reader *, const cpp_token *, + unsigned int *, int *); +/* Evaluate a vector of CPP_*STRING* tokens. */ +extern bool cpp_interpret_string (cpp_reader *, + const cpp_string *, size_t, + cpp_string *, enum cpp_ttype); +extern const char *cpp_interpret_string_ranges (cpp_reader *pfile, + const cpp_string *from, + cpp_string_location_reader *, + size_t count, + cpp_substring_ranges *out, + enum cpp_ttype type); +extern bool cpp_interpret_string_notranslate (cpp_reader *, + const cpp_string *, size_t, + cpp_string *, enum cpp_ttype); + +/* Convert a host character constant to the execution character set. */ +extern cppchar_t cpp_host_to_exec_charset (cpp_reader *, cppchar_t); + +/* Used to register macros and assertions, perhaps from the command line. + The text is the same as the command line argument. */ +extern void cpp_define (cpp_reader *, const char *); +extern void cpp_define_unused (cpp_reader *, const char *); +extern void cpp_define_formatted (cpp_reader *pfile, + const char *fmt, ...) ATTRIBUTE_PRINTF_2; +extern void cpp_define_formatted_unused (cpp_reader *pfile, + const char *fmt, + ...) ATTRIBUTE_PRINTF_2; +extern void cpp_assert (cpp_reader *, const char *); +extern void cpp_undef (cpp_reader *, const char *); +extern void cpp_unassert (cpp_reader *, const char *); + +/* Mark a node as a lazily defined macro. */ +extern void cpp_define_lazily (cpp_reader *, cpp_hashnode *node, unsigned N); + +/* Undefine all macros and assertions. */ +extern void cpp_undef_all (cpp_reader *); + +extern cpp_buffer *cpp_push_buffer (cpp_reader *, const unsigned char *, + size_t, int); +extern int cpp_defined (cpp_reader *, const unsigned char *, int); + +/* A preprocessing number. Code assumes that any unused high bits of + the double integer are set to zero. */ + +/* This type has to be equal to unsigned HOST_WIDE_INT, see + gcc/c-family/c-lex.cc. */ +typedef uint64_t cpp_num_part; +typedef struct cpp_num cpp_num; +struct cpp_num +{ + cpp_num_part high; + cpp_num_part low; + bool unsignedp; /* True if value should be treated as unsigned. */ + bool overflow; /* True if the most recent calculation overflowed. */ +}; + +/* cpplib provides two interfaces for interpretation of preprocessing + numbers. + + cpp_classify_number categorizes numeric constants according to + their field (integer, floating point, or invalid), radix (decimal, + octal, hexadecimal), and type suffixes. */ + +#define CPP_N_CATEGORY 0x000F +#define CPP_N_INVALID 0x0000 +#define CPP_N_INTEGER 0x0001 +#define CPP_N_FLOATING 0x0002 + +#define CPP_N_WIDTH 0x00F0 +#define CPP_N_SMALL 0x0010 /* int, float, short _Fract/Accum */ +#define CPP_N_MEDIUM 0x0020 /* long, double, long _Fract/_Accum. */ +#define CPP_N_LARGE 0x0040 /* long long, long double, + long long _Fract/Accum. */ + +#define CPP_N_WIDTH_MD 0xF0000 /* machine defined. */ +#define CPP_N_MD_W 0x10000 +#define CPP_N_MD_Q 0x20000 + +#define CPP_N_RADIX 0x0F00 +#define CPP_N_DECIMAL 0x0100 +#define CPP_N_HEX 0x0200 +#define CPP_N_OCTAL 0x0400 +#define CPP_N_BINARY 0x0800 + +#define CPP_N_UNSIGNED 0x1000 /* Properties. */ +#define CPP_N_IMAGINARY 0x2000 +#define CPP_N_DFLOAT 0x4000 +#define CPP_N_DEFAULT 0x8000 + +#define CPP_N_FRACT 0x100000 /* Fract types. */ +#define CPP_N_ACCUM 0x200000 /* Accum types. */ +#define CPP_N_FLOATN 0x400000 /* _FloatN types. */ +#define CPP_N_FLOATNX 0x800000 /* _FloatNx types. */ + +#define CPP_N_USERDEF 0x1000000 /* C++11 user-defined literal. */ + +#define CPP_N_SIZE_T 0x2000000 /* C++23 size_t literal. */ + +#define CPP_N_WIDTH_FLOATN_NX 0xF0000000 /* _FloatN / _FloatNx value + of N, divided by 16. */ +#define CPP_FLOATN_SHIFT 24 +#define CPP_FLOATN_MAX 0xF0 + +/* Classify a CPP_NUMBER token. The return value is a combination of + the flags from the above sets. */ +extern unsigned cpp_classify_number (cpp_reader *, const cpp_token *, + const char **, location_t); + +/* Return the classification flags for a float suffix. */ +extern unsigned int cpp_interpret_float_suffix (cpp_reader *, const char *, + size_t); + +/* Return the classification flags for an int suffix. */ +extern unsigned int cpp_interpret_int_suffix (cpp_reader *, const char *, + size_t); + +/* Evaluate a token classified as category CPP_N_INTEGER. */ +extern cpp_num cpp_interpret_integer (cpp_reader *, const cpp_token *, + unsigned int); + +/* Sign extend a number, with PRECISION significant bits and all + others assumed clear, to fill out a cpp_num structure. */ +cpp_num cpp_num_sign_extend (cpp_num, size_t); + +/* Output a diagnostic of some kind. */ +extern bool cpp_error (cpp_reader *, enum cpp_diagnostic_level, + const char *msgid, ...) + ATTRIBUTE_PRINTF_3; +extern bool cpp_warning (cpp_reader *, enum cpp_warning_reason, + const char *msgid, ...) + ATTRIBUTE_PRINTF_3; +extern bool cpp_pedwarning (cpp_reader *, enum cpp_warning_reason, + const char *msgid, ...) + ATTRIBUTE_PRINTF_3; +extern bool cpp_warning_syshdr (cpp_reader *, enum cpp_warning_reason reason, + const char *msgid, ...) + ATTRIBUTE_PRINTF_3; + +/* As their counterparts above, but use RICHLOC. */ +extern bool cpp_warning_at (cpp_reader *, enum cpp_warning_reason, + rich_location *richloc, const char *msgid, ...) + ATTRIBUTE_PRINTF_4; +extern bool cpp_pedwarning_at (cpp_reader *, enum cpp_warning_reason, + rich_location *richloc, const char *msgid, ...) + ATTRIBUTE_PRINTF_4; + +/* Output a diagnostic with "MSGID: " preceding the + error string of errno. No location is printed. */ +extern bool cpp_errno (cpp_reader *, enum cpp_diagnostic_level, + const char *msgid); +/* Similarly, but with "FILENAME: " instead of "MSGID: ", where + the filename is not localized. */ +extern bool cpp_errno_filename (cpp_reader *, enum cpp_diagnostic_level, + const char *filename, location_t loc); + +/* Same as cpp_error, except additionally specifies a position as a + (translation unit) physical line and physical column. If the line is + zero, then no location is printed. */ +extern bool cpp_error_with_line (cpp_reader *, enum cpp_diagnostic_level, + location_t, unsigned, + const char *msgid, ...) + ATTRIBUTE_PRINTF_5; +extern bool cpp_warning_with_line (cpp_reader *, enum cpp_warning_reason, + location_t, unsigned, + const char *msgid, ...) + ATTRIBUTE_PRINTF_5; +extern bool cpp_pedwarning_with_line (cpp_reader *, enum cpp_warning_reason, + location_t, unsigned, + const char *msgid, ...) + ATTRIBUTE_PRINTF_5; +extern bool cpp_warning_with_line_syshdr (cpp_reader *, enum cpp_warning_reason, + location_t, unsigned, + const char *msgid, ...) + ATTRIBUTE_PRINTF_5; + +extern bool cpp_error_at (cpp_reader * pfile, enum cpp_diagnostic_level, + location_t src_loc, const char *msgid, ...) + ATTRIBUTE_PRINTF_4; + +extern bool cpp_error_at (cpp_reader * pfile, enum cpp_diagnostic_level, + rich_location *richloc, const char *msgid, ...) + ATTRIBUTE_PRINTF_4; + +/* In lex.cc */ +extern int cpp_ideq (const cpp_token *, const char *); +extern void cpp_output_line (cpp_reader *, FILE *); +extern unsigned char *cpp_output_line_to_string (cpp_reader *, + const unsigned char *); +extern const unsigned char *cpp_alloc_token_string + (cpp_reader *, const unsigned char *, unsigned); +extern void cpp_output_token (const cpp_token *, FILE *); +extern const char *cpp_type2name (enum cpp_ttype, unsigned char flags); +/* Returns the value of an escape sequence, truncated to the correct + target precision. PSTR points to the input pointer, which is just + after the backslash. LIMIT is how much text we have. WIDE is true + if the escape sequence is part of a wide character constant or + string literal. Handles all relevant diagnostics. */ +extern cppchar_t cpp_parse_escape (cpp_reader *, const unsigned char ** pstr, + const unsigned char *limit, int wide); + +/* Structure used to hold a comment block at a given location in the + source code. */ + +typedef struct +{ + /* Text of the comment including the terminators. */ + char *comment; + + /* source location for the given comment. */ + location_t sloc; +} cpp_comment; + +/* Structure holding all comments for a given cpp_reader. */ + +typedef struct +{ + /* table of comment entries. */ + cpp_comment *entries; + + /* number of actual entries entered in the table. */ + int count; + + /* number of entries allocated currently. */ + int allocated; +} cpp_comment_table; + +/* Returns the table of comments encountered by the preprocessor. This + table is only populated when pfile->state.save_comments is true. */ +extern cpp_comment_table *cpp_get_comments (cpp_reader *); + +/* In hash.c */ + +/* Lookup an identifier in the hashtable. Puts the identifier in the + table if it is not already there. */ +extern cpp_hashnode *cpp_lookup (cpp_reader *, const unsigned char *, + unsigned int); + +typedef int (*cpp_cb) (cpp_reader *, cpp_hashnode *, void *); +extern void cpp_forall_identifiers (cpp_reader *, cpp_cb, void *); + +/* In macro.cc */ +extern void cpp_scan_nooutput (cpp_reader *); +extern int cpp_sys_macro_p (cpp_reader *); +extern unsigned char *cpp_quote_string (unsigned char *, const unsigned char *, + unsigned int); +extern bool cpp_compare_macros (const cpp_macro *macro1, + const cpp_macro *macro2); + +/* In files.cc */ +extern bool cpp_included (cpp_reader *, const char *); +extern bool cpp_included_before (cpp_reader *, const char *, location_t); +extern void cpp_make_system_header (cpp_reader *, int, int); +extern bool cpp_push_include (cpp_reader *, const char *); +extern bool cpp_push_default_include (cpp_reader *, const char *); +extern void cpp_change_file (cpp_reader *, enum lc_reason, const char *); +extern const char *cpp_get_path (struct _cpp_file *); +extern cpp_dir *cpp_get_dir (struct _cpp_file *); +extern cpp_buffer *cpp_get_buffer (cpp_reader *); +extern struct _cpp_file *cpp_get_file (cpp_buffer *); +extern cpp_buffer *cpp_get_prev (cpp_buffer *); +extern void cpp_clear_file_cache (cpp_reader *); + +/* cpp_get_converted_source returns the contents of the given file, as it exists + after cpplib has read it and converted it from the input charset to the + source charset. Return struct will be zero-filled if the data could not be + read for any reason. The data starts at the DATA pointer, but the TO_FREE + pointer is what should be passed to free(), as there may be an offset. */ +struct cpp_converted_source +{ + char *to_free; + char *data; + size_t len; +}; +cpp_converted_source cpp_get_converted_source (const char *fname, + const char *input_charset); + +/* In pch.cc */ +struct save_macro_data; +extern int cpp_save_state (cpp_reader *, FILE *); +extern int cpp_write_pch_deps (cpp_reader *, FILE *); +extern int cpp_write_pch_state (cpp_reader *, FILE *); +extern int cpp_valid_state (cpp_reader *, const char *, int); +extern void cpp_prepare_state (cpp_reader *, struct save_macro_data **); +extern int cpp_read_state (cpp_reader *, const char *, FILE *, + struct save_macro_data *); + +/* In lex.cc */ +extern void cpp_force_token_locations (cpp_reader *, location_t); +extern void cpp_stop_forcing_token_locations (cpp_reader *); +enum CPP_DO_task +{ + CPP_DO_print, + CPP_DO_location, + CPP_DO_token +}; + +extern void cpp_directive_only_process (cpp_reader *pfile, + void *data, + void (*cb) (cpp_reader *, + CPP_DO_task, + void *data, ...)); + +/* In expr.cc */ +extern enum cpp_ttype cpp_userdef_string_remove_type + (enum cpp_ttype type); +extern enum cpp_ttype cpp_userdef_string_add_type + (enum cpp_ttype type); +extern enum cpp_ttype cpp_userdef_char_remove_type + (enum cpp_ttype type); +extern enum cpp_ttype cpp_userdef_char_add_type + (enum cpp_ttype type); +extern bool cpp_userdef_string_p + (enum cpp_ttype type); +extern bool cpp_userdef_char_p + (enum cpp_ttype type); +extern const char * cpp_get_userdef_suffix + (const cpp_token *); + +/* In charset.cc */ + +/* The result of attempting to decode a run of UTF-8 bytes. */ + +struct cpp_decoded_char +{ + const char *m_start_byte; + const char *m_next_byte; + + bool m_valid_ch; + cppchar_t m_ch; +}; + +/* Information for mapping between code points and display columns. + + This is a tabstop value, along with a callback for getting the + widths of characters. Normally this callback is cpp_wcwidth, but we + support other schemes for escaping non-ASCII unicode as a series of + ASCII chars when printing the user's source code in diagnostic-show-locus.cc + + For example, consider: + - the Unicode character U+03C0 "GREEK SMALL LETTER PI" (UTF-8: 0xCF 0x80) + - the Unicode character U+1F642 "SLIGHTLY SMILING FACE" + (UTF-8: 0xF0 0x9F 0x99 0x82) + - the byte 0xBF (a stray trailing byte of a UTF-8 character) + Normally U+03C0 would occupy one display column, U+1F642 + would occupy two display columns, and the stray byte would be + printed verbatim as one display column. + + However when escaping them as unicode code points as "" + and "" they occupy 8 and 9 display columns respectively, + and when escaping them as bytes as "<80>" and "<9F><99><82>" + they occupy 8 and 16 display columns respectively. In both cases + the stray byte is escaped to as 4 display columns. */ + +struct cpp_char_column_policy +{ + cpp_char_column_policy (int tabstop, + int (*width_cb) (cppchar_t c)) + : m_tabstop (tabstop), + m_undecoded_byte_width (1), + m_width_cb (width_cb) + {} + + int m_tabstop; + /* Width in display columns of a stray byte that isn't decodable + as UTF-8. */ + int m_undecoded_byte_width; + int (*m_width_cb) (cppchar_t c); +}; + +/* A class to manage the state while converting a UTF-8 sequence to cppchar_t + and computing the display width one character at a time. */ +class cpp_display_width_computation { + public: + cpp_display_width_computation (const char *data, int data_length, + const cpp_char_column_policy &policy); + const char *next_byte () const { return m_next; } + int bytes_processed () const { return m_next - m_begin; } + int bytes_left () const { return m_bytes_left; } + bool done () const { return !bytes_left (); } + int display_cols_processed () const { return m_display_cols; } + + int process_next_codepoint (cpp_decoded_char *out); + int advance_display_cols (int n); + + private: + const char *const m_begin; + const char *m_next; + size_t m_bytes_left; + const cpp_char_column_policy &m_policy; + int m_display_cols; +}; + +/* Convenience functions that are simple use cases for class + cpp_display_width_computation. Tab characters will be expanded to spaces + as determined by POLICY.m_tabstop, and non-printable-ASCII characters + will be escaped as per POLICY. */ + +int cpp_byte_column_to_display_column (const char *data, int data_length, + int column, + const cpp_char_column_policy &policy); +inline int cpp_display_width (const char *data, int data_length, + const cpp_char_column_policy &policy) +{ + return cpp_byte_column_to_display_column (data, data_length, data_length, + policy); +} +int cpp_display_column_to_byte_column (const char *data, int data_length, + int display_col, + const cpp_char_column_policy &policy); +int cpp_wcwidth (cppchar_t c); + +bool cpp_input_conversion_is_trivial (const char *input_charset); +int cpp_check_utf8_bom (const char *data, size_t data_length); + +#endif /* ! LIBCPP_CPPLIB_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cselib.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cselib.h new file mode 100644 index 0000000..9ae65e6 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/cselib.h @@ -0,0 +1,142 @@ +/* Common subexpression elimination for GNU compiler. + Copyright (C) 1987-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_CSELIB_H +#define GCC_CSELIB_H + +/* Describe a value. */ +struct cselib_val +{ + /* The hash value. */ + unsigned int hash; + + /* A unique id assigned to values. */ + int uid; + + /* A VALUE rtx that points back to this structure. */ + rtx val_rtx; + + /* All rtl expressions that hold this value at the current time during a + scan. */ + struct elt_loc_list *locs; + + /* If this value is used as an address, points to a list of values that + use it as an address in a MEM. */ + struct elt_list *addr_list; + + struct cselib_val *next_containing_mem; +}; + +/* A list of rtl expressions that hold the same value. */ +struct elt_loc_list { + /* Next element in the list. */ + struct elt_loc_list *next; + /* An rtl expression that holds the value. */ + rtx loc; + /* The insn that made the equivalence. */ + rtx_insn *setting_insn; +}; + +/* Describe a single set that is part of an insn. */ +struct cselib_set +{ + rtx src; + rtx dest; + cselib_val *src_elt; + cselib_val *dest_addr_elt; +}; + +enum cselib_record_what +{ + CSELIB_RECORD_MEMORY = 1, + CSELIB_PRESERVE_CONSTANTS = 2 +}; + +extern void (*cselib_discard_hook) (cselib_val *); +extern void (*cselib_record_sets_hook) (rtx_insn *insn, struct cselib_set *sets, + int n_sets); + +extern cselib_val *cselib_lookup (rtx, machine_mode, + int, machine_mode); +extern cselib_val *cselib_lookup_from_insn (rtx, machine_mode, + int, machine_mode, rtx_insn *); +extern void cselib_init (int); +extern void cselib_clear_table (void); +extern void cselib_finish (void); +extern void cselib_process_insn (rtx_insn *); +extern bool fp_setter_insn (rtx_insn *); +extern machine_mode cselib_reg_set_mode (const_rtx); +extern int rtx_equal_for_cselib_1 (rtx, rtx, machine_mode, int); +extern int references_value_p (const_rtx, int); +extern rtx cselib_expand_value_rtx (rtx, bitmap, int); +typedef rtx (*cselib_expand_callback)(rtx, bitmap, int, void *); +extern rtx cselib_expand_value_rtx_cb (rtx, bitmap, int, + cselib_expand_callback, void *); +extern bool cselib_dummy_expand_value_rtx_cb (rtx, bitmap, int, + cselib_expand_callback, void *); +extern rtx cselib_subst_to_values (rtx, machine_mode); +extern rtx cselib_subst_to_values_from_insn (rtx, machine_mode, rtx_insn *); +extern void cselib_invalidate_rtx (rtx); + +extern void cselib_reset_table (unsigned int); +extern unsigned int cselib_get_next_uid (void); +extern void cselib_preserve_value (cselib_val *); +extern bool cselib_preserved_value_p (cselib_val *); +extern void cselib_preserve_only_values (void); +extern void cselib_preserve_cfa_base_value (cselib_val *, unsigned int); +extern void cselib_add_permanent_equiv (cselib_val *, rtx, rtx_insn *); +extern bool cselib_have_permanent_equivalences (void); +extern void cselib_set_value_sp_based (cselib_val *); +extern bool cselib_sp_based_value_p (cselib_val *); +extern void cselib_record_sp_cfa_base_equiv (HOST_WIDE_INT, rtx_insn *); +extern bool cselib_sp_derived_value_p (cselib_val *); + +extern void dump_cselib_table (FILE *); + +/* Return the canonical value for VAL, following the equivalence chain + towards the earliest (== lowest uid) equivalent value. */ + +static inline cselib_val * +canonical_cselib_val (cselib_val *val) +{ + cselib_val *canon; + + if (!val->locs || val->locs->next + || !val->locs->loc || GET_CODE (val->locs->loc) != VALUE + || val->uid < CSELIB_VAL_PTR (val->locs->loc)->uid) + return val; + + canon = CSELIB_VAL_PTR (val->locs->loc); + gcc_checking_assert (canonical_cselib_val (canon) == canon); + return canon; +} + +/* Return nonzero if we can prove that X and Y contain the same value, taking + our gathered information into account. */ + +static inline int +rtx_equal_for_cselib_p (rtx x, rtx y) +{ + if (x == y) + return 1; + + return rtx_equal_for_cselib_1 (x, y, VOIDmode, 0); +} + +#endif /* GCC_CSELIB_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ctfc.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ctfc.h new file mode 100644 index 0000000..001e544 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ctfc.h @@ -0,0 +1,448 @@ +/* ctfc.h - Declarations and definitions related to the CTF container. + Copyright (C) 2019-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* This file defines the data structures and functions used by the compiler + to generate the CTF debug info. The definitions below are compiler internal + representations and closely reflect the CTF format requirements in . + + The contents of the CTF container are used eventually for emission of both + CTF (ctfout.cc) and BTF debug info (btfout.cc), as the two type debug formats + are close cousins. */ + +#ifndef GCC_CTFC_H +#define GCC_CTFC_H 1 + +#include "config.h" +#include "system.h" +#include "tree.h" +#include "fold-const.h" +#include "dwarf2ctf.h" +#include "ctf.h" +#include "btf.h" + +/* Invalid CTF type ID definition. */ + +#define CTF_NULL_TYPEID 0 + +/* Value to start generating the CTF type ID from. */ + +#define CTF_INIT_TYPEID 1 + +/* CTF type ID. */ + +typedef uint64_t ctf_id_t; + +/* CTF string table element (list node). */ + +typedef struct GTY ((chain_next ("%h.cts_next"))) ctf_string +{ + const char * cts_str; /* CTF string. */ + struct ctf_string * cts_next; /* A list node. */ +} ctf_string_t; + +/* Internal representation of CTF string table. */ + +typedef struct GTY (()) ctf_strtable +{ + ctf_string_t * ctstab_head; /* Head str ptr. */ + ctf_string_t * ctstab_tail; /* Tail. new str appended to tail. */ + int ctstab_num; /* Number of strings in the table. */ + size_t ctstab_len; /* Size of string table in bytes. */ + const char * ctstab_estr; /* Empty string "". */ +} ctf_strtable_t; + +/* Encoding information for integers, floating-point values etc. The flags + field will contain values appropriate for the type defined in . */ + +typedef struct GTY (()) ctf_encoding +{ + unsigned int cte_format; /* Data format (CTF_INT_* or CTF_FP_* flags). */ + unsigned int cte_offset; /* Offset of value in bits. */ + unsigned int cte_bits; /* Size of storage in bits. */ +} ctf_encoding_t; + +/* Array information for CTF generation. */ + +typedef struct GTY (()) ctf_arinfo +{ + ctf_id_t ctr_contents; /* Type of array contents. */ + ctf_id_t ctr_index; /* Type of array index. */ + unsigned int ctr_nelems; /* Number of elements. */ +} ctf_arinfo_t; + +/* Function information for CTF generation. */ + +typedef struct GTY (()) ctf_funcinfo +{ + ctf_id_t ctc_return; /* Function return type. */ + unsigned int ctc_argc; /* Number of typed arguments to function. */ + unsigned int ctc_flags; /* Function attributes (see below). */ +} ctf_funcinfo_t; + +typedef struct GTY (()) ctf_sliceinfo +{ + unsigned int cts_type; /* Reference CTF type. */ + unsigned short cts_offset; /* Offset in bits of the first bit. */ + unsigned short cts_bits; /* Size in bits. */ +} ctf_sliceinfo_t; + +/* CTF type representation internal to the compiler. It closely reflects the + ctf_type_t type node in except the GTY (()) tags. */ + +typedef struct GTY (()) ctf_itype +{ + uint32_t ctti_name; /* Reference to name in string table. */ + uint32_t ctti_info; /* Encoded kind, variant length (see below). */ + union GTY ((desc ("0"))) + { + uint32_t GTY ((tag ("0"))) _size;/* Size of entire type in bytes. */ + uint32_t GTY ((tag ("1"))) _type;/* Reference to another type. */ + } _u; + uint32_t ctti_lsizehi; /* High 32 bits of type size in bytes. */ + uint32_t ctti_lsizelo; /* Low 32 bits of type size in bytes. */ +} ctf_itype_t; + +#define ctti_size _u._size +#define ctti_type _u._type + +/* Function arguments end with varargs. */ + +#define CTF_FUNC_VARARG 0x1 + +/* Struct/union/enum member definition for CTF generation. */ + +typedef struct GTY ((chain_next ("%h.dmd_next"))) ctf_dmdef +{ + const char * dmd_name; /* Name of this member. */ + ctf_id_t dmd_type; /* Type of this member (for sou). */ + uint32_t dmd_name_offset; /* Offset of the name in str table. */ + uint64_t dmd_offset; /* Offset of this member in bits (for sou). */ + int dmd_value; /* Value of this member (for enum). */ + struct ctf_dmdef * dmd_next; /* A list node. */ +} ctf_dmdef_t; + +#define ctf_dmd_list_next(elem) ((ctf_dmdef_t *)((elem)->dmd_next)) + +/* Function Argument. */ + +typedef struct GTY (()) ctf_func_arg +{ + ctf_id_t farg_type; /* Type identifier of the argument. */ + const char * farg_name; /* Name of the argument. */ + uint32_t farg_name_offset; /* Offset of the name in str table. */ + struct ctf_func_arg * farg_next;/* A list node. */ +} ctf_func_arg_t; + +#define ctf_farg_list_next(elem) ((ctf_func_arg_t *)((elem)->farg_next)) + +/* Type definition for CTF generation. */ + +struct GTY ((for_user)) ctf_dtdef +{ + dw_die_ref dtd_key; /* Type key for hashing. */ + const char * dtd_name; /* Name associated with definition (if any). */ + ctf_id_t dtd_type; /* Type identifier for this definition. */ + ctf_itype_t dtd_data; /* Type node. */ + bool from_global_func; /* Whether this type was added from a global + function. */ + union GTY ((desc ("ctf_dtu_d_union_selector (&%1)"))) + { + /* struct, union, or enum. */ + ctf_dmdef_t * GTY ((tag ("CTF_DTU_D_MEMBERS"))) dtu_members; + /* array. */ + ctf_arinfo_t GTY ((tag ("CTF_DTU_D_ARRAY"))) dtu_arr; + /* integer or float. */ + ctf_encoding_t GTY ((tag ("CTF_DTU_D_ENCODING"))) dtu_enc; + /* function. */ + ctf_func_arg_t * GTY ((tag ("CTF_DTU_D_ARGUMENTS"))) dtu_argv; + /* slice. */ + ctf_sliceinfo_t GTY ((tag ("CTF_DTU_D_SLICE"))) dtu_slice; + } dtd_u; +}; + +typedef struct ctf_dtdef ctf_dtdef_t; + +/* Variable definition for CTF generation. */ + +struct GTY ((for_user)) ctf_dvdef +{ + dw_die_ref dvd_key; /* DWARF DIE corresponding to the variable. */ + const char * dvd_name; /* Name associated with variable. */ + uint32_t dvd_name_offset; /* Offset of the name in str table. */ + unsigned int dvd_visibility; /* External visibility. 0=static,1=global. */ + ctf_id_t dvd_type; /* Type of variable. */ +}; + +typedef struct ctf_dvdef ctf_dvdef_t; + +typedef ctf_dvdef_t * ctf_dvdef_ref; +typedef ctf_dtdef_t * ctf_dtdef_ref; + +/* Location information for CTF Types and CTF Variables. */ + +typedef struct GTY (()) ctf_srcloc +{ + const char * ctsloc_file; + unsigned int ctsloc_line; + unsigned int ctsloc_col; +} ctf_srcloc_t; + +typedef ctf_srcloc_t * ctf_srcloc_ref; + +/* Helper enum and api for the GTY machinery to work on union dtu_d. */ + +enum ctf_dtu_d_union_enum { + CTF_DTU_D_MEMBERS, + CTF_DTU_D_ARRAY, + CTF_DTU_D_ENCODING, + CTF_DTU_D_ARGUMENTS, + CTF_DTU_D_SLICE +}; + +enum ctf_dtu_d_union_enum +ctf_dtu_d_union_selector (ctf_dtdef_ref); + +struct ctfc_dtd_hasher : ggc_ptr_hash +{ + typedef ctf_dtdef_ref compare_type; + + static hashval_t hash (ctf_dtdef_ref); + static bool equal (ctf_dtdef_ref, ctf_dtdef_ref); +}; + +inline hashval_t +ctfc_dtd_hasher::hash (ctf_dtdef_ref dtd) +{ + return htab_hash_pointer (dtd->dtd_key); +} + +inline bool +ctfc_dtd_hasher::equal (ctf_dtdef_ref dtd, ctf_dtdef_ref dtd2) +{ + return (dtd->dtd_key == dtd2->dtd_key); +} + +struct ctfc_dvd_hasher : ggc_ptr_hash +{ + typedef ctf_dvdef_ref compare_type; + + static hashval_t hash (ctf_dvdef_ref); + static bool equal (ctf_dvdef_ref, ctf_dvdef_ref); +}; + +inline hashval_t +ctfc_dvd_hasher::hash (ctf_dvdef_ref dvd) +{ + return htab_hash_pointer (dvd->dvd_key); +} + +inline bool +ctfc_dvd_hasher::equal (ctf_dvdef_ref dvd, ctf_dvdef_ref dvd2) +{ + return (dvd->dvd_key == dvd2->dvd_key); +} + +/* CTF container structure. + It is the context passed around when generating ctf debug info. There is + one container per translation unit. */ + +typedef struct GTY (()) ctf_container +{ + /* CTF Preamble. */ + unsigned short ctfc_magic; + unsigned char ctfc_version; + unsigned char ctfc_flags; + uint32_t ctfc_cuname_offset; + + /* CTF types. */ + hash_table * GTY (()) ctfc_types; + /* CTF variables. */ + hash_table * GTY (()) ctfc_vars; + /* CTF variables to be ignored. */ + hash_table * GTY (()) ctfc_ignore_vars; + + /* CTF string table. */ + ctf_strtable_t ctfc_strtable; + /* Auxilliary string table. At this time, used for keeping func arg names + for BTF. */ + ctf_strtable_t ctfc_aux_strtable; + + uint64_t ctfc_num_types; + uint64_t ctfc_num_stypes; + uint64_t ctfc_num_global_funcs; + uint64_t ctfc_num_global_objts; + + /* Number of vlen bytes - the variable length portion after ctf_type_t and + ctf_stype_t in the CTF section. This is used to calculate the offsets in + the CTF header. */ + uint64_t ctfc_num_vlen_bytes; + + /* Next CTF type id to assign. */ + ctf_id_t ctfc_nextid; + + /* Specify an explicit length of 0 so that the GC marking routines steer + clear of marking the CTF vars and CTF types twice. These lists below do + not own the pointed to objects, they simply hold references to them. */ + + /* List of pre-processed CTF Variables. CTF requires that the variables + appear in the sorted order of their names. */ + ctf_dvdef_t ** GTY ((length ("0"))) ctfc_vars_list; + /* Count of pre-processed CTF Variables in the list. */ + uint64_t ctfc_vars_list_count; + /* List of pre-processed CTF types. CTF requires that a shared type must + appear before the type that uses it. For the compiler, this means types + are emitted in sorted order of their type IDs. */ + ctf_dtdef_t ** GTY ((length ("0"))) ctfc_types_list; + /* List of CTF function types for global functions. The order of global + function entries in the CTF funcinfo section is undefined by the + compiler. */ + ctf_dtdef_t ** GTY ((length ("0"))) ctfc_gfuncs_list; + /* List of CTF variables at global scope. The order of global object entries + in the CTF objinfo section is undefined by the compiler. */ + ctf_dvdef_t ** GTY ((length ("0"))) ctfc_gobjts_list; + + /* Following members are for debugging only. They do not add functional + value to the task of CTF creation. These can be cleaned up once CTF + generation stabilizes. */ + + /* Keep a count of the number of bytes dumped in asm for debugging + purposes. */ + uint64_t ctfc_numbytes_asm; + /* Total length of all strings in CTF. */ + size_t ctfc_strlen; + /* Total length of all strings in aux string table. */ + size_t ctfc_aux_strlen; + +} ctf_container_t; + +/* Markers for which string table from the CTF container to use. */ + +#define CTF_STRTAB 0 /* CTF string table. */ +#define CTF_AUX_STRTAB 1 /* CTF auxilliary string table. */ + +typedef ctf_container_t * ctf_container_ref; + +extern GTY (()) ctf_container_ref tu_ctfc; + +extern void ctfc_delete_container (ctf_container_ref); + +/* If the next ctf type id is still set to the init value, no ctf records to + report. */ +extern bool ctfc_is_empty_container (ctf_container_ref); + +/* Get the total number of CTF types in the container. */ + +extern unsigned int ctfc_get_num_ctf_types (ctf_container_ref); + +/* Get the total number of CTF variables in the container. */ + +extern unsigned int ctfc_get_num_ctf_vars (ctf_container_ref); + +/* Get reference to the CTF string table or the CTF auxilliary + string table. */ + +extern ctf_strtable_t * ctfc_get_strtab (ctf_container_ref, int); + +/* Get the length of the specified string table in the CTF container. */ + +extern size_t ctfc_get_strtab_len (ctf_container_ref, int); + +/* Get the number of bytes to represent the variable length portion of all CTF + types in the CTF container. */ + +extern size_t ctfc_get_num_vlen_bytes (ctf_container_ref); + +/* The compiler demarcates whether types are visible at top-level scope or not. + The only example so far of a type not visible at top-level scope is slices. + CTF_ADD_NONROOT is used to indicate the latter. */ +#define CTF_ADD_NONROOT 0 /* CTF type only visible in nested scope. */ +#define CTF_ADD_ROOT 1 /* CTF type visible at top-level scope. */ + +/* These APIs allow to initialize and finalize the CTF machinery and + to add types to the CTF container associated to the current + translation unit. Used in dwarf2ctf.cc. */ + +extern void ctf_init (void); +extern void ctf_output (const char * filename); +extern void ctf_finalize (void); + +extern void btf_output (const char * filename); +extern void btf_init_postprocess (void); +extern void btf_finalize (void); + +extern ctf_container_ref ctf_get_tu_ctfc (void); + +extern bool ctf_type_exists (ctf_container_ref, dw_die_ref, ctf_id_t *); + +extern void ctf_add_cuname (ctf_container_ref, const char *); + +extern ctf_dtdef_ref ctf_dtd_lookup (const ctf_container_ref ctfc, + dw_die_ref die); +extern ctf_dvdef_ref ctf_dvd_lookup (const ctf_container_ref ctfc, + dw_die_ref die); +extern bool ctf_dvd_ignore_lookup (const ctf_container_ref ctfc, + dw_die_ref die); + +extern const char * ctf_add_string (ctf_container_ref, const char *, + uint32_t *, int); + +extern ctf_id_t ctf_add_reftype (ctf_container_ref, uint32_t, ctf_id_t, + uint32_t, dw_die_ref); +extern ctf_id_t ctf_add_enum (ctf_container_ref, uint32_t, const char *, + HOST_WIDE_INT, dw_die_ref); +extern ctf_id_t ctf_add_slice (ctf_container_ref, uint32_t, ctf_id_t, + uint32_t, uint32_t, dw_die_ref); +extern ctf_id_t ctf_add_float (ctf_container_ref, uint32_t, const char *, + const ctf_encoding_t *, dw_die_ref); +extern ctf_id_t ctf_add_integer (ctf_container_ref, uint32_t, const char *, + const ctf_encoding_t *, dw_die_ref); +extern ctf_id_t ctf_add_unknown (ctf_container_ref, uint32_t, const char *, + const ctf_encoding_t *, dw_die_ref); +extern ctf_id_t ctf_add_pointer (ctf_container_ref, uint32_t, ctf_id_t, + dw_die_ref); +extern ctf_id_t ctf_add_array (ctf_container_ref, uint32_t, + const ctf_arinfo_t *, dw_die_ref); +extern ctf_id_t ctf_add_forward (ctf_container_ref, uint32_t, const char *, + uint32_t, dw_die_ref); +extern ctf_id_t ctf_add_typedef (ctf_container_ref, uint32_t, const char *, + ctf_id_t, dw_die_ref); +extern ctf_id_t ctf_add_function (ctf_container_ref, uint32_t, const char *, + const ctf_funcinfo_t *, dw_die_ref, bool); +extern ctf_id_t ctf_add_sou (ctf_container_ref, uint32_t, const char *, + uint32_t, size_t, dw_die_ref); + +extern int ctf_add_enumerator (ctf_container_ref, ctf_id_t, const char *, + HOST_WIDE_INT, dw_die_ref); +extern int ctf_add_member_offset (ctf_container_ref, dw_die_ref, const char *, + ctf_id_t, uint64_t); +extern int ctf_add_function_arg (ctf_container_ref, dw_die_ref, + const char *, ctf_id_t); +extern int ctf_add_variable (ctf_container_ref, const char *, ctf_id_t, + dw_die_ref, unsigned int, dw_die_ref); + +extern ctf_id_t ctf_lookup_tree_type (ctf_container_ref, const tree); +extern ctf_id_t get_btf_id (ctf_id_t); + +/* CTF section does not emit location information; at this time, location + information is needed for BTF CO-RE use-cases. */ + +extern int ctfc_get_dtd_srcloc (ctf_dtdef_ref, ctf_srcloc_ref); +extern int ctfc_get_dvd_srcloc (ctf_dvdef_ref, ctf_srcloc_ref); + +#endif /* GCC_CTFC_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/d/d-tree.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/d/d-tree.def new file mode 100644 index 0000000..9cef280 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/d/d-tree.def @@ -0,0 +1,29 @@ +/* d-tree.def -- Definitions and documentation for additional tree codes used + in the D compiler (see tree.def for standard codes). + Copyright (C) 2006-2022 Free Software Foundation, Inc. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Logical shift done on an unsigned type. If the first operand is + signed, it will be converted to the unsigned equivalent. The second + operand is the number of bits to shift by; it need not be the same + type as the first operand and result. */ +DEFTREECODE (UNSIGNED_RSHIFT_EXPR, "unsigned_rshift_expr", tcc_binary, 2) + +/* Floating point modulus that expands to a call to fmod. */ +DEFTREECODE (FLOAT_MOD_EXPR, "float_mod_expr", tcc_binary, 2) + +/* Used to represent information associated with a function closure. */ +DEFTREECODE (FUNCFRAME_INFO, "funcframe_info", tcc_exceptional, 0) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/data-streamer.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/data-streamer.h new file mode 100644 index 0000000..df01d00 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/data-streamer.h @@ -0,0 +1,349 @@ +/* Generic streaming support for various data types. + + Copyright (C) 2011-2022 Free Software Foundation, Inc. + Contributed by Diego Novillo + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DATA_STREAMER_H +#define GCC_DATA_STREAMER_H + +#include "lto-streamer.h" + +/* Data structures used to pack values and bitflags into a vector of + words. Used to stream values of a fixed number of bits in a space + efficient way. */ +static unsigned const BITS_PER_BITPACK_WORD = HOST_BITS_PER_WIDE_INT; + +typedef unsigned HOST_WIDE_INT bitpack_word_t; + +struct bitpack_d +{ + /* The position of the first unused or unconsumed bit in the word. */ + unsigned pos; + + /* The current word we are (un)packing. */ + bitpack_word_t word; + + /* The lto_output_stream or the lto_input_block we are streaming to/from. */ + void *stream; +}; + +/* In data-streamer.cc */ +void bp_pack_var_len_unsigned (struct bitpack_d *, unsigned HOST_WIDE_INT); +void bp_pack_var_len_int (struct bitpack_d *, HOST_WIDE_INT); +unsigned HOST_WIDE_INT bp_unpack_var_len_unsigned (struct bitpack_d *); +HOST_WIDE_INT bp_unpack_var_len_int (struct bitpack_d *); + +/* In data-streamer-out.cc */ +void streamer_write_zero (struct output_block *); +void streamer_write_uhwi (struct output_block *, unsigned HOST_WIDE_INT); +void streamer_write_hwi (struct output_block *, HOST_WIDE_INT); +void streamer_write_poly_uint64 (struct output_block *, poly_uint64); +void streamer_write_poly_int64 (struct output_block *, poly_int64); +void streamer_write_gcov_count (struct output_block *, gcov_type); +void streamer_write_string (struct output_block *, struct lto_output_stream *, + const char *, bool); +void streamer_write_string_with_length (struct output_block *, + struct lto_output_stream *, + const char *, unsigned int, bool); +void bp_pack_string_with_length (struct output_block *, struct bitpack_d *, + const char *, unsigned int, bool); +void bp_pack_string (struct output_block *, struct bitpack_d *, + const char *, bool); +void streamer_write_uhwi_stream (struct lto_output_stream *, + unsigned HOST_WIDE_INT); +void streamer_write_hwi_stream (struct lto_output_stream *, HOST_WIDE_INT); +void streamer_write_gcov_count_stream (struct lto_output_stream *, gcov_type); +void streamer_write_data_stream (struct lto_output_stream *, const void *, + size_t); +void streamer_write_wide_int (struct output_block *, const wide_int &); +void streamer_write_widest_int (struct output_block *, const widest_int &); + +/* In data-streamer-in.cc */ +const char *streamer_read_string (class data_in *, class lto_input_block *); +const char *streamer_read_indexed_string (class data_in *, + class lto_input_block *, + unsigned int *); +const char *bp_unpack_indexed_string (class data_in *, struct bitpack_d *, + unsigned int *); +const char *bp_unpack_string (class data_in *, struct bitpack_d *); +unsigned HOST_WIDE_INT streamer_read_uhwi (class lto_input_block *); +HOST_WIDE_INT streamer_read_hwi (class lto_input_block *); +poly_uint64 streamer_read_poly_uint64 (class lto_input_block *); +poly_int64 streamer_read_poly_int64 (class lto_input_block *); +gcov_type streamer_read_gcov_count (class lto_input_block *); +wide_int streamer_read_wide_int (class lto_input_block *); +widest_int streamer_read_widest_int (class lto_input_block *); + +/* Returns a new bit-packing context for bit-packing into S. */ +static inline struct bitpack_d +bitpack_create (struct lto_output_stream *s) +{ + struct bitpack_d bp; + bp.pos = 0; + bp.word = 0; + bp.stream = (void *)s; + return bp; +} + +/* Pack the NBITS bit sized value VAL into the bit-packing context BP. */ +static inline void +bp_pack_value (struct bitpack_d *bp, bitpack_word_t val, unsigned nbits) +{ + bitpack_word_t word = bp->word; + int pos = bp->pos; + + /* Verify that VAL fits in the NBITS. */ + gcc_checking_assert (nbits == BITS_PER_BITPACK_WORD + || !(val & ~(((bitpack_word_t)1< BITS_PER_BITPACK_WORD) + { + streamer_write_uhwi_stream ((struct lto_output_stream *) bp->stream, + word); + word = val; + pos = nbits; + } + else + { + word |= val << pos; + pos += nbits; + } + bp->word = word; + bp->pos = pos; +} + +/* Pack VAL into the bit-packing context BP, using NBITS for each + coefficient. */ +static inline void +bp_pack_poly_value (struct bitpack_d *bp, + const poly_int &val, + unsigned nbits) +{ + for (int i = 0; i < NUM_POLY_INT_COEFFS; ++i) + bp_pack_value (bp, val.coeffs[i], nbits); +} + +/* Finishes bit-packing of BP. */ +static inline void +streamer_write_bitpack (struct bitpack_d *bp) +{ + streamer_write_uhwi_stream ((struct lto_output_stream *) bp->stream, + bp->word); + bp->word = 0; + bp->pos = 0; +} + +/* Returns a new bit-packing context for bit-unpacking from IB. */ +static inline struct bitpack_d +streamer_read_bitpack (class lto_input_block *ib) +{ + struct bitpack_d bp; + bp.word = streamer_read_uhwi (ib); + bp.pos = 0; + bp.stream = (void *)ib; + return bp; +} + +/* Unpacks NBITS bits from the bit-packing context BP and returns them. */ +static inline bitpack_word_t +bp_unpack_value (struct bitpack_d *bp, unsigned nbits) +{ + bitpack_word_t mask, val; + int pos = bp->pos; + + mask = (nbits == BITS_PER_BITPACK_WORD + ? (bitpack_word_t) -1 + : ((bitpack_word_t) 1 << nbits) - 1); + + /* If there are not continuous nbits in the current bitpack word + switch to the next one. */ + if (pos + nbits > BITS_PER_BITPACK_WORD) + { + bp->word = val + = streamer_read_uhwi ((class lto_input_block *)bp->stream); + bp->pos = nbits; + return val & mask; + } + val = bp->word; + val >>= pos; + bp->pos = pos + nbits; + + return val & mask; +} + +/* Unpacks a polynomial value from the bit-packing context BP in which each + coefficient has NBITS bits. */ +static inline poly_int +bp_unpack_poly_value (struct bitpack_d *bp, unsigned nbits) +{ + poly_int_pod x; + for (int i = 0; i < NUM_POLY_INT_COEFFS; ++i) + x.coeffs[i] = bp_unpack_value (bp, nbits); + return x; +} + + +/* Write a character to the output block. */ + +static inline void +streamer_write_char_stream (struct lto_output_stream *obs, char c) +{ + /* No space left. */ + if (obs->left_in_block == 0) + lto_append_block (obs); + + /* Write the actual character. */ + char *current_pointer = obs->current_pointer; + *(current_pointer++) = c; + obs->current_pointer = current_pointer; + obs->total_size++; + obs->left_in_block--; +} + + +/* Read byte from the input block. */ + +static inline unsigned char +streamer_read_uchar (class lto_input_block *ib) +{ + if (ib->p >= ib->len) + lto_section_overrun (ib); + return (ib->data[ib->p++]); +} + +/* Output VAL into OBS and verify it is in range MIN...MAX that is supposed + to be compile time constant. + Be host independent, limit range to 31bits. */ + +static inline void +streamer_write_hwi_in_range (struct lto_output_stream *obs, + HOST_WIDE_INT min, + HOST_WIDE_INT max, + HOST_WIDE_INT val) +{ + HOST_WIDE_INT range = max - min; + + gcc_checking_assert (val >= min && val <= max && range > 0 + && range < 0x7fffffff); + + val -= min; + streamer_write_uhwi_stream (obs, (unsigned HOST_WIDE_INT) val); +} + +/* Input VAL into OBS and verify it is in range MIN...MAX that is supposed + to be compile time constant. PURPOSE is used for error reporting. */ + +static inline HOST_WIDE_INT +streamer_read_hwi_in_range (class lto_input_block *ib, + const char *purpose, + HOST_WIDE_INT min, + HOST_WIDE_INT max) +{ + HOST_WIDE_INT range = max - min; + unsigned HOST_WIDE_INT uval = streamer_read_uhwi (ib); + + gcc_checking_assert (range > 0 && range < 0x7fffffff); + + HOST_WIDE_INT val = (HOST_WIDE_INT) (uval + (unsigned HOST_WIDE_INT) min); + if (val < min || val > max) + lto_value_range_error (purpose, val, min, max); + return val; +} + +/* Output VAL into BP and verify it is in range MIN...MAX that is supposed + to be compile time constant. + Be host independent, limit range to 31bits. */ + +static inline void +bp_pack_int_in_range (struct bitpack_d *bp, + HOST_WIDE_INT min, + HOST_WIDE_INT max, + HOST_WIDE_INT val) +{ + HOST_WIDE_INT range = max - min; + int nbits = floor_log2 (range) + 1; + + gcc_checking_assert (val >= min && val <= max && range > 0 + && range < 0x7fffffff); + + val -= min; + bp_pack_value (bp, val, nbits); +} + +/* Input VAL into BP and verify it is in range MIN...MAX that is supposed + to be compile time constant. PURPOSE is used for error reporting. */ + +static inline HOST_WIDE_INT +bp_unpack_int_in_range (struct bitpack_d *bp, + const char *purpose, + HOST_WIDE_INT min, + HOST_WIDE_INT max) +{ + HOST_WIDE_INT range = max - min; + int nbits = floor_log2 (range) + 1; + HOST_WIDE_INT val = bp_unpack_value (bp, nbits); + + gcc_checking_assert (range > 0 && range < 0x7fffffff); + + if (val < min || val > max) + lto_value_range_error (purpose, val, min, max); + return val; +} + +/* Output VAL of type "enum enum_name" into OBS. + Assume range 0...ENUM_LAST - 1. */ +#define streamer_write_enum(obs,enum_name,enum_last,val) \ + streamer_write_hwi_in_range ((obs), 0, (int)(enum_last) - 1, (int)(val)) + +/* Input enum of type "enum enum_name" from IB. + Assume range 0...ENUM_LAST - 1. */ +#define streamer_read_enum(ib,enum_name,enum_last) \ + (enum enum_name)streamer_read_hwi_in_range ((ib), #enum_name, 0, \ + (int)(enum_last) - 1) + +/* Output VAL of type "enum enum_name" into BP. + Assume range 0...ENUM_LAST - 1. */ +#define bp_pack_enum(bp,enum_name,enum_last,val) \ + bp_pack_int_in_range ((bp), 0, (int)(enum_last) - 1, (int)(val)) + +/* Input enum of type "enum enum_name" from BP. + Assume range 0...ENUM_LAST - 1. */ +#define bp_unpack_enum(bp,enum_name,enum_last) \ + (enum enum_name)bp_unpack_int_in_range ((bp), #enum_name, 0, \ + (int)(enum_last) - 1) + +/* Output the start of a record with TAG to output block OB. */ + +static inline void +streamer_write_record_start (struct output_block *ob, enum LTO_tags tag) +{ + streamer_write_enum (ob->main_stream, LTO_tags, LTO_NUM_TAGS, tag); +} + +/* Return the next tag in the input block IB. */ + +static inline enum LTO_tags +streamer_read_record_start (class lto_input_block *ib) +{ + return streamer_read_enum (ib, LTO_tags, LTO_NUM_TAGS); +} + +#endif /* GCC_DATA_STREAMER_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dbgcnt.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dbgcnt.def new file mode 100644 index 0000000..3aa18cd --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dbgcnt.def @@ -0,0 +1,216 @@ +/* This file contains the list of the debug counter for GCC. + Copyright (C) 2006-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + + +/* A debug counter provides you a way to count an event + and return false after the counter has exceeded the threshold + specified by the option. + + What is it used for ? + + This is primarily used to speed up the search for the bad transformation + an optimization pass does. By doing a binary search on N, + you can quickly narrow down to one transformation + which is bad, or which triggers the bad behavior downstream + (usually in the form of the badly generated code). + + How does it work ? + + Every time dbg_cnt(named-counter) is called, + the counter is incremented for the named-counter. + And the incremented value is compared against the threshold (limit) + specified by the option. + dbg_cnt () returns true if it is at or below threshold, and false if above. + + How to add a new one ? + + To add a new counter, simply add an entry below with some descriptive name, + and add call(s) to dbg_cnt(your-counter-name) in appropriate places. + Usually, you want to control at the finest granularity + any particular transformation can happen. + e.g. for each instruction in a dead code elimination, + or for each copy instruction in register coalescing, + or constant-propagation for each insn, + or a block straightening, etc. + See dce.cc for an example. With the dbg_cnt () call in dce.cc, + now a developer can use -fdbg-cnt=dce:N + to stop doing the dead code elimination after N times. + + How to use it ? + + By default, all limits are UINT_MAX. + Since debug count is unsigned int, <= UINT_MAX returns true always. + i.e. dbg_cnt() returns true always regardless of the counter value + (although it still counts the event). + Use -fdbg-cnt=counter1:N,counter2:M,... + which sets the limit for counter1 to N, and the limit for counter2 to M, etc. + e.g. setting a limit to zero will make dbg_cnt () return false *always*. + + The following shell file can then be used to binary search for + exact transformation that causes the bug. A second shell script + should be written, say "tryTest", which exits with 1 if the + compiled program fails and exits with 0 if the program succeeds. + This shell script should take 1 parameter, the value to be passed + to set the counter of the compilation command in tryTest. Then, + assuming that the following script is called binarySearch, + the command: + + binarySearch tryTest + + will automatically find the highest value of the counter for which + the program fails. If tryTest never fails, binarySearch will + produce unpredictable results as it will try to find an upper bound + that does not exist. + + When dbgcnt does hits the limit, it writes a comment in the current + dump_file of the form: + + ***dbgcnt: limit reached for %s.*** + + Assuming that the dump file is logging the analysis/transformations + it is making, this pinpoints the exact position in the log file + where the problem transformation is being logged. + +===================================== +#!/bin/bash + +while getopts "l:u:i:" opt +do + case $opt in + l) lb="$OPTARG";; + u) ub="$OPTARG";; + i) init="$OPTARG";; + ?) usage; exit 3;; + esac +done + +shift $(($OPTIND - 1)) +echo $@ +cmd=${1+"${@}"} + +lb=${lb:=0} +init=${init:=100} + +$cmd $lb +lb_val=$? +if [ -z "$ub" ]; then + # find the upper bound + ub=$(($init + $lb)) + true + while [ $? -eq $lb_val ]; do + ub=$(($ub * 10)) + #ub=`expr $ub \* 10` + $cmd $ub + done +fi + +echo command: $cmd + +true +while [ `expr $ub - $lb` -gt 1 ]; do + try=$(($lb + ( $ub - $lb ) / 2)) + $cmd $try + if [ $? -eq $lb_val ]; then + lb=$try + else + ub=$try + fi +done + +echo lbound: $lb +echo ubound: $ub + +===================================== + +*/ + +/* Debug counter definitions. + Please keep the list sorted in alphabetic order. */ +DEBUG_COUNTER (asan_use_after_scope) +DEBUG_COUNTER (auto_inc_dec) +DEBUG_COUNTER (back_thread1) +DEBUG_COUNTER (back_thread2) +DEBUG_COUNTER (back_threadfull1) +DEBUG_COUNTER (back_threadfull2) +DEBUG_COUNTER (ccp) +DEBUG_COUNTER (cfg_cleanup) +DEBUG_COUNTER (cprop) +DEBUG_COUNTER (cse2_move2add) +DEBUG_COUNTER (dce) +DEBUG_COUNTER (dce_fast) +DEBUG_COUNTER (dce_ud) +DEBUG_COUNTER (delete_trivial_dead) +DEBUG_COUNTER (devirt) +DEBUG_COUNTER (df_byte_scan) +DEBUG_COUNTER (dom_unreachable_edges) +DEBUG_COUNTER (dse) +DEBUG_COUNTER (dse1) +DEBUG_COUNTER (dse2) +DEBUG_COUNTER (gcse2_delete) +DEBUG_COUNTER (gimple_unroll) +DEBUG_COUNTER (global_alloc_at_func) +DEBUG_COUNTER (global_alloc_at_reg) +DEBUG_COUNTER (graphite_scop) +DEBUG_COUNTER (hoist) +DEBUG_COUNTER (hoist_insn) +DEBUG_COUNTER (ia64_sched2) +DEBUG_COUNTER (if_after_combine) +DEBUG_COUNTER (if_after_reload) +DEBUG_COUNTER (if_conversion) +DEBUG_COUNTER (if_conversion_tree) +DEBUG_COUNTER (if_to_switch) +DEBUG_COUNTER (ipa_attr) +DEBUG_COUNTER (ipa_cp_bits) +DEBUG_COUNTER (ipa_cp_values) +DEBUG_COUNTER (ipa_cp_vr) +DEBUG_COUNTER (ipa_mod_ref) +DEBUG_COUNTER (ipa_mod_ref_pta) +DEBUG_COUNTER (ipa_sra_params) +DEBUG_COUNTER (ipa_sra_retvalues) +DEBUG_COUNTER (ira_move) +DEBUG_COUNTER (ivopts_loop) +DEBUG_COUNTER (lim) +DEBUG_COUNTER (local_alloc_for_sched) +DEBUG_COUNTER (match) +DEBUG_COUNTER (merged_ipa_icf) +DEBUG_COUNTER (phiopt_edge_range) +DEBUG_COUNTER (postreload_cse) +DEBUG_COUNTER (pre) +DEBUG_COUNTER (pre_insn) +DEBUG_COUNTER (prefetch) +DEBUG_COUNTER (registered_jump_thread) +DEBUG_COUNTER (sched2_func) +DEBUG_COUNTER (sched_block) +DEBUG_COUNTER (sched_breakdep) +DEBUG_COUNTER (sched_func) +DEBUG_COUNTER (sched_insn) +DEBUG_COUNTER (sched_region) +DEBUG_COUNTER (sel_sched_cnt) +DEBUG_COUNTER (sel_sched_insn_cnt) +DEBUG_COUNTER (sel_sched_region_cnt) +DEBUG_COUNTER (sms_sched_loop) +DEBUG_COUNTER (split_for_sched2) +DEBUG_COUNTER (store_merging) +DEBUG_COUNTER (store_motion) +DEBUG_COUNTER (stv_conversion) +DEBUG_COUNTER (tail_call) +DEBUG_COUNTER (tree_sra) +DEBUG_COUNTER (treepre_insert) +DEBUG_COUNTER (vect_loop) +DEBUG_COUNTER (vect_slp) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dbgcnt.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dbgcnt.h new file mode 100644 index 0000000..39873ff --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dbgcnt.h @@ -0,0 +1,40 @@ +/* Debug counter for debugging support + Copyright (C) 2006-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. + +See dbgcnt.def for usage information. */ + +#ifndef GCC_DBGCNT_H +#define GCC_DBGCNT_H + +#define DEBUG_COUNTER(a) a, + +enum debug_counter { +#include "dbgcnt.def" + debug_counter_number_of_counters +}; + +#undef DEBUG_COUNTER + +extern bool dbg_cnt_is_enabled (enum debug_counter index); +extern bool dbg_cnt (enum debug_counter index); +extern unsigned dbg_cnt_counter (enum debug_counter index); +extern void dbg_cnt_process_opt (const char *arg); +extern void dbg_cnt_list_all_counters (void); + +#endif /* GCC_DBGCNT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dbxout.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dbxout.h new file mode 100644 index 0000000..2c38e76 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dbxout.h @@ -0,0 +1,60 @@ +/* dbxout.h - Various declarations for functions found in dbxout.cc + Copyright (C) 1998-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DBXOUT_H +#define GCC_DBXOUT_H + +extern int dbxout_symbol (tree, int); +extern void dbxout_parms (tree); +extern void dbxout_reg_parms (tree); +extern int dbxout_syms (tree); + +extern void default_stabs_asm_out_destructor (rtx, int); +extern void default_stabs_asm_out_constructor (rtx, int); + +/* dbxout helper functions */ +#if defined DBX_DEBUGGING_INFO || defined XCOFF_DEBUGGING_INFO + +extern void dbxout_int (int); +extern void dbxout_stabd (int, int); +extern void dbxout_begin_stabn (int); +extern void dbxout_begin_stabn_sline (int); +extern void dbxout_begin_empty_stabs (int); +extern void dbxout_begin_simple_stabs (const char *, int); +extern void dbxout_begin_simple_stabs_desc (const char *, int, int); + +extern void dbxout_stab_value_zero (void); +extern void dbxout_stab_value_label (const char *); +extern void dbxout_stab_value_label_diff (const char *, const char *); +extern void dbxout_stab_value_internal_label (const char *, int *); +extern void dbxout_stab_value_internal_label_diff (const char *, int *, + const char *); +#endif + +/* Language description for N_SO stabs. */ +#define N_SO_AS 1 +#define N_SO_C 2 +#define N_SO_ANSI_C 3 +#define N_SO_CC 4 /* c++*/ +#define N_SO_FORTRAN 5 +#define N_SO_FORTRAN90 7 +#define N_SO_OBJC 50 +#define N_SO_OBJCPLUS 51 + +#endif /* GCC_DBXOUT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dce.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dce.h new file mode 100644 index 0000000..90af301 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dce.h @@ -0,0 +1,27 @@ +/* RTL dead code elimination. + Copyright (C) 2005-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DCE_H +#define GCC_DCE_H + +extern void run_word_dce (void); +extern void run_fast_dce (void); +extern void run_fast_df_dce (void); + +#endif /* GCC_DCE_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ddg.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ddg.h new file mode 100644 index 0000000..1048ea2 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ddg.h @@ -0,0 +1,182 @@ +/* DDG - Data Dependence Graph - interface. + Copyright (C) 2004-2022 Free Software Foundation, Inc. + Contributed by Ayal Zaks and Mustafa Hagog + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DDG_H +#define GCC_DDG_H + +/* For sbitmap. */ + +typedef struct ddg_node *ddg_node_ptr; +typedef struct ddg_edge *ddg_edge_ptr; +typedef struct ddg *ddg_ptr; +typedef struct ddg_scc *ddg_scc_ptr; +typedef struct ddg_all_sccs *ddg_all_sccs_ptr; + +enum dep_type {TRUE_DEP, OUTPUT_DEP, ANTI_DEP}; +enum dep_data_type {REG_OR_MEM_DEP, REG_DEP, MEM_DEP, REG_AND_MEM_DEP}; + +/* The following two macros enables direct access to the successors and + predecessors bitmaps held in each ddg_node. Do not make changes to + these bitmaps, unless you want to change the DDG. */ +#define NODE_SUCCESSORS(x) ((x)->successors) +#define NODE_PREDECESSORS(x) ((x)->predecessors) + +/* A structure that represents a node in the DDG. */ +struct ddg_node +{ + /* Each node has a unique CUID index. These indices increase monotonically + (according to the order of the corresponding INSN in the BB), starting + from 0 with no gaps. */ + int cuid; + + /* The insn represented by the node. */ + rtx_insn *insn; + + /* A note preceding INSN (or INSN itself), such that all insns linked + from FIRST_NOTE until INSN (inclusive of both) are moved together + when reordering the insns. This takes care of notes that should + continue to precede INSN. */ + rtx_insn *first_note; + + /* Incoming and outgoing dependency edges. */ + ddg_edge_ptr in; + ddg_edge_ptr out; + + /* Each bit corresponds to a ddg_node according to its cuid, and is + set iff the node is a successor/predecessor of "this" node. */ + sbitmap successors; + sbitmap predecessors; + + /* Temporary array used for Floyd-Warshall algorithm to find + scc recurrence length. */ + int *max_dist; + + /* For general use by algorithms manipulating the ddg. */ + union { + int count; + void *info; + } aux; +}; + +/* A structure that represents an edge in the DDG. */ +struct ddg_edge +{ + /* The source and destination nodes of the dependency edge. */ + ddg_node_ptr src; + ddg_node_ptr dest; + + /* TRUE, OUTPUT or ANTI dependency. */ + dep_type type; + + /* REG or MEM dependency. */ + dep_data_type data_type; + + /* Latency of the dependency. */ + int latency; + + /* The distance: number of loop iterations the dependency crosses. */ + int distance; + + /* The following two fields are used to form a linked list of the in/out + going edges to/from each node. */ + ddg_edge_ptr next_in; + ddg_edge_ptr next_out; + + /* Is true when edge is already in scc. */ + bool in_scc; +}; + +/* This structure holds the Data Dependence Graph for a basic block. */ +struct ddg +{ + /* The basic block for which this DDG is built. */ + basic_block bb; + + /* Number of instructions in the basic block. */ + int num_nodes; + + /* Number of load/store instructions in the BB - statistics. */ + int num_loads; + int num_stores; + + /* This array holds the nodes in the graph; it is indexed by the node + cuid, which follows the order of the instructions in the BB. */ + ddg_node_ptr nodes; + + /* The branch closing the loop. */ + ddg_node_ptr closing_branch; + + /* Build dependence edges for closing_branch, when set. In certain cases, + the closing branch can be dealt with separately from the insns of the + loop, and then no such deps are needed. */ + int closing_branch_deps; + + /* Array and number of backarcs (edges with distance > 0) in the DDG. */ + int num_backarcs; + ddg_edge_ptr *backarcs; +}; + + +/* Holds information on an SCC (Strongly Connected Component) of the DDG. */ +struct ddg_scc +{ + /* A bitmap that represents the nodes of the DDG that are in the SCC. */ + sbitmap nodes; + + /* Array and number of backarcs (edges with distance > 0) in the SCC. */ + ddg_edge_ptr *backarcs; + int num_backarcs; + + /* The maximum of (total_latency/total_distance) over all cycles in SCC. */ + int recurrence_length; +}; + +/* This structure holds the SCCs of the DDG. */ +struct ddg_all_sccs +{ + /* Array that holds the SCCs in the DDG, and their number. */ + ddg_scc_ptr *sccs; + int num_sccs; + + ddg_ptr ddg; +}; + + +ddg_ptr create_ddg (basic_block, int closing_branch_deps); +void free_ddg (ddg_ptr); + +void print_ddg (FILE *, ddg_ptr); +void vcg_print_ddg (FILE *, ddg_ptr); +void print_ddg_edge (FILE *, ddg_edge_ptr); +void print_sccs (FILE *, ddg_all_sccs_ptr, ddg_ptr); + +ddg_node_ptr get_node_of_insn (ddg_ptr, rtx_insn *); + +void find_successors (sbitmap result, ddg_ptr, sbitmap); +void find_predecessors (sbitmap result, ddg_ptr, sbitmap); + +ddg_all_sccs_ptr create_ddg_all_sccs (ddg_ptr); +void free_ddg_all_sccs (ddg_all_sccs_ptr); + +int find_nodes_on_paths (sbitmap result, ddg_ptr, sbitmap from, sbitmap to); + +bool autoinc_var_is_used_p (rtx_insn *, rtx_insn *); + +#endif /* GCC_DDG_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/debug.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/debug.h new file mode 100644 index 0000000..17a7e48 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/debug.h @@ -0,0 +1,282 @@ +/* Debug hooks for GCC. + Copyright (C) 2001-2022 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the + Free Software Foundation; either version 3, or (at your option) any + later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING3. If not see + . */ + +#ifndef GCC_DEBUG_H +#define GCC_DEBUG_H + +/* This structure contains hooks for the debug information output + functions, accessed through the global instance debug_hooks set in + toplev.cc according to command line options. */ +/* WARNING: Do not add new debug hook targets - DWARF will be the only + way to speak debug to the middle-end once we are able to get rid of + the remaining targets. If you need alternate output formats instead + generate them off the DWARF representation. */ +struct gcc_debug_hooks +{ + /* Initialize debug output. MAIN_FILENAME is the name of the main + input file. */ + void (* init) (const char *main_filename); + + /* Output debug symbols. */ + void (* finish) (const char *main_filename); + + /* Run cleanups necessary after early debug generation. */ + void (* early_finish) (const char *main_filename); + + /* Called from cgraph_optimize before starting to assemble + functions/variables/toplevel asms. */ + void (* assembly_start) (void); + + /* Macro defined on line LINE with name and expansion TEXT. */ + void (* define) (unsigned int line, const char *text); + + /* MACRO undefined on line LINE. */ + void (* undef) (unsigned int line, const char *macro); + + /* Record the beginning of a new source file FILE from LINE number + in the previous one. */ + void (* start_source_file) (unsigned int line, const char *file); + + /* Record the resumption of a source file. LINE is the line number + in the source file we are returning to. */ + void (* end_source_file) (unsigned int line); + + /* Record the beginning of block N, counting from 1 and not + including the function-scope block, at LINE. */ + void (* begin_block) (unsigned int line, unsigned int n); + + /* Record the end of a block. Arguments as for begin_block. */ + void (* end_block) (unsigned int line, unsigned int n); + + /* Returns nonzero if it is appropriate not to emit any debugging + information for BLOCK, because it doesn't contain any + instructions. This may not be the case for blocks containing + nested functions, since we may actually call such a function even + though the BLOCK information is messed up. Defaults to true. */ + bool (* ignore_block) (const_tree); + + /* Record a source file location at (FILE, LINE, COLUMN, DISCRIMINATOR). */ + void (* source_line) (unsigned int line, unsigned int column, + const char *file, int discriminator, bool is_stmt); + + /* Record a source file location for a DECL_IGNORED_P function. */ + void (* set_ignored_loc) (unsigned int line, unsigned int column, + const char *file); + + /* Called at start of prologue code. LINE is the first line in the + function. */ + void (* begin_prologue) (unsigned int line, unsigned int column, + const char *file); + + /* Called at end of prologue code. LINE is the first line in the + function. */ + void (* end_prologue) (unsigned int line, const char *file); + + /* Called at beginning of epilogue code. */ + void (* begin_epilogue) (unsigned int line, const char *file); + + /* Record end of epilogue code. */ + void (* end_epilogue) (unsigned int line, const char *file); + + /* Called at start of function DECL, before it is declared. */ + void (* begin_function) (tree decl); + + /* Record end of function. LINE is highest line number in function. */ + void (* end_function) (unsigned int line); + + /* Register UNIT as the main translation unit. Called from front-ends when + they create their main translation unit. */ + void (* register_main_translation_unit) (tree); + + /* Debug information for a function DECL. This might include the + function name (a symbol), its parameters, and the block that + makes up the function's body, and the local variables of the + function. + + This is only called for FUNCTION_DECLs. It is part of the late + debug pass and is called from rest_of_handle_final. + + Location information is available at this point. + + See the documentation for early_global_decl and late_global_decl + for other entry points into the debugging back-ends for DECLs. */ + void (* function_decl) (tree decl); + + /* Debug information for a global DECL. Called from the parser + after the parsing process has finished. + + This gets called for both variables and functions. + + Location information is not available at this point, but it is a + good probe point to get access to symbols before they get + optimized away. + + This hook may be called on VAR_DECLs or FUNCTION_DECLs. It is up + to the hook to use what it needs. */ + void (* early_global_decl) (tree decl); + + /* Augment debug information generated by early_global_decl with + more complete debug info (if applicable). Called from toplev.cc + after the compilation proper has finished and cgraph information + is available. + + This gets called for both variables and functions. + + Location information is usually available at this point, unless + the hook is being called for a decl that has been optimized away. + + This hook may be called on VAR_DECLs or FUNCTION_DECLs. It is up + to the hook to use what it needs. */ + void (* late_global_decl) (tree decl); + + /* Debug information for a type DECL. Called from toplev.cc after + compilation proper, also from various language front ends to + record built-in types. The second argument is properly a + boolean, which indicates whether or not the type is a "local" + type as determined by the language. (It's not a boolean for + legacy reasons.) */ + void (* type_decl) (tree decl, int local); + + /* Debug information for imported modules and declarations. */ + void (* imported_module_or_decl) (tree decl, tree name, + tree context, bool child, + bool implicit); + + /* Return true if a DIE for the tree is available and return a symbol + and offset that can be used to refer to it externally. */ + bool (* die_ref_for_decl) (tree, const char **, unsigned HOST_WIDE_INT *); + + /* Early debug information for the tree is available at symbol plus + offset externally. */ + void (* register_external_die) (tree, const char *, unsigned HOST_WIDE_INT); + + /* DECL is an inline function, whose body is present, but which is + not being output at this point. */ + void (* deferred_inline_function) (tree decl); + + /* DECL is an inline function which is about to be emitted out of + line. The hook is useful to, e.g., emit abstract debug info for + the inline before it gets mangled by optimization. */ + void (* outlining_inline_function) (tree decl); + + /* Called from final_scan_insn for any CODE_LABEL insn whose + LABEL_NAME is non-null. */ + void (* label) (rtx_code_label *); + + /* Called after the start and before the end of writing a PCH file. + The parameter is 0 if after the start, 1 if before the end. */ + void (* handle_pch) (unsigned int); + + /* Called from final_scan_insn for any NOTE_INSN_VAR_LOCATION note. */ + void (* var_location) (rtx_insn *); + + /* Called from final_scan_insn for any NOTE_INSN_INLINE_ENTRY note. */ + void (* inline_entry) (tree block); + + /* Called from finalize_size_functions for size functions so that their body + can be encoded in the debug info to describe the layout of variable-length + structures. */ + void (* size_function) (tree decl); + + /* Called from final_scan_insn if there is a switch between hot and cold + text sections. */ + void (* switch_text_section) (void); + + /* Called from grokdeclarator. Replaces the anonymous name with the + type name. */ + void (* set_name) (tree, tree); + + /* This is 1 if the debug writer wants to see start and end commands for the + main source files, and 0 otherwise. */ + int start_end_main_source_file; + + /* The type of symtab field used by these debug hooks. This is one + of the TYPE_SYMTAB_IS_xxx values defined in tree.h. */ + int tree_type_symtab_field; +}; + +extern const struct gcc_debug_hooks *debug_hooks; + +/* The do-nothing hooks. */ +extern void debug_nothing_void (void); +extern void debug_nothing_charstar (const char *); +extern void debug_nothing_int_int_charstar (unsigned int, unsigned int, + const char *); +extern void debug_nothing_int_charstar (unsigned int, const char *); +extern void debug_nothing_int_int_charstar_int_bool (unsigned int, + unsigned int, + const char *, + int, bool); +extern void debug_nothing_int (unsigned int); +extern void debug_nothing_int_int (unsigned int, unsigned int); +extern void debug_nothing_tree (tree); +extern void debug_nothing_tree_tree (tree, tree); +extern void debug_nothing_tree_int (tree, int); +extern void debug_nothing_tree_tree_tree_bool_bool (tree, tree, tree, + bool, bool); +extern bool debug_true_const_tree (const_tree); +extern void debug_nothing_rtx_insn (rtx_insn *); +extern void debug_nothing_rtx_code_label (rtx_code_label *); +extern bool debug_false_tree_charstarstar_uhwistar (tree, const char **, + unsigned HOST_WIDE_INT *); +extern void debug_nothing_tree_charstar_uhwi (tree, const char *, + unsigned HOST_WIDE_INT); + +/* Hooks for various debug formats. */ +extern const struct gcc_debug_hooks do_nothing_debug_hooks; +extern const struct gcc_debug_hooks dbx_debug_hooks; +extern const struct gcc_debug_hooks xcoff_debug_hooks; +extern const struct gcc_debug_hooks dwarf2_debug_hooks; +extern const struct gcc_debug_hooks dwarf2_lineno_debug_hooks; +extern const struct gcc_debug_hooks vmsdbg_debug_hooks; + +/* Dwarf2 frame information. */ + +extern void dwarf2out_begin_prologue (unsigned int, unsigned int, + const char *); +extern void dwarf2out_vms_end_prologue (unsigned int, const char *); +extern void dwarf2out_vms_begin_epilogue (unsigned int, const char *); +extern void dwarf2out_end_epilogue (unsigned int, const char *); +extern void dwarf2out_frame_finish (void); +extern bool dwarf2out_do_eh_frame (void); +extern bool dwarf2out_do_frame (void); +extern bool dwarf2out_do_cfi_asm (void); +extern void dwarf2out_switch_text_section (void); +extern bool dwarf2out_default_as_loc_support (void); +extern bool dwarf2out_default_as_locview_support (void); + +/* For -fdump-go-spec. */ + +extern const struct gcc_debug_hooks * +dump_go_spec_init (const char *, const struct gcc_debug_hooks *); + +/* Instance discriminator mapping table. See final.cc. */ +typedef hash_map decl_to_instance_map_t; +extern decl_to_instance_map_t *decl_to_instance_map; + +/* Allocate decl_to_instance_map with COUNT slots to begin wtih, if it + * hasn't been allocated yet. */ + +static inline decl_to_instance_map_t * +maybe_create_decl_to_instance_map (int count = 13) +{ + if (!decl_to_instance_map) + decl_to_instance_map = new decl_to_instance_map_t (count); + return decl_to_instance_map; +} + +#endif /* !GCC_DEBUG_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/defaults.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/defaults.h new file mode 100644 index 0000000..7c7a80e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/defaults.h @@ -0,0 +1,1458 @@ +/* Definitions of various defaults for tm.h macros. + Copyright (C) 1992-2022 Free Software Foundation, Inc. + Contributed by Ron Guilmette (rfg@monkeys.com) + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#ifndef GCC_DEFAULTS_H +#define GCC_DEFAULTS_H + +/* How to start an assembler comment. */ +#ifndef ASM_COMMENT_START +#define ASM_COMMENT_START ";#" +#endif + +/* Store in OUTPUT a string (made with alloca) containing an + assembler-name for a local static variable or function named NAME. + LABELNO is an integer which is different for each call. */ + +#ifndef ASM_PN_FORMAT +# ifndef NO_DOT_IN_LABEL +# define ASM_PN_FORMAT "%s.%lu" +# else +# ifndef NO_DOLLAR_IN_LABEL +# define ASM_PN_FORMAT "%s$%lu" +# else +# define ASM_PN_FORMAT "__%s_%lu" +# endif +# endif +#endif /* ! ASM_PN_FORMAT */ + +#ifndef ASM_FORMAT_PRIVATE_NAME +# define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \ + do { const char *const name_ = (NAME); \ + char *const output_ = (OUTPUT) = \ + (char *) alloca (strlen (name_) + 32); \ + sprintf (output_, ASM_PN_FORMAT, name_, (unsigned long)(LABELNO)); \ + } while (0) +#endif + +/* Choose a reasonable default for ASM_OUTPUT_ASCII. */ + +#ifndef ASM_OUTPUT_ASCII +#define ASM_OUTPUT_ASCII(MYFILE, MYSTRING, MYLENGTH) \ + do { \ + FILE *_my_file = (MYFILE); \ + const unsigned char *_hide_p = (const unsigned char *) (MYSTRING); \ + int _hide_thissize = (MYLENGTH); \ + { \ + const unsigned char *p = _hide_p; \ + int thissize = _hide_thissize; \ + int i; \ + fprintf (_my_file, "\t.ascii \""); \ + \ + for (i = 0; i < thissize; i++) \ + { \ + int c = p[i]; \ + if (c == '\"' || c == '\\') \ + putc ('\\', _my_file); \ + if (ISPRINT (c)) \ + putc (c, _my_file); \ + else \ + { \ + fprintf (_my_file, "\\%o", c); \ + /* After an octal-escape, if a digit follows, \ + terminate one string constant and start another. \ + The VAX assembler fails to stop reading the escape \ + after three digits, so this is the only way we \ + can get it to parse the data properly. */ \ + if (i < thissize - 1 && ISDIGIT (p[i + 1])) \ + fprintf (_my_file, "\"\n\t.ascii \""); \ + } \ + } \ + fprintf (_my_file, "\"\n"); \ + } \ + } \ + while (0) +#endif + +/* This is how we tell the assembler to equate two values. */ +#ifdef SET_ASM_OP +#ifndef ASM_OUTPUT_DEF +#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \ + do { fprintf ((FILE), "%s", SET_ASM_OP); \ + assemble_name (FILE, LABEL1); \ + fprintf (FILE, ","); \ + assemble_name (FILE, LABEL2); \ + fprintf (FILE, "\n"); \ + } while (0) +#endif +#endif + +#ifndef IFUNC_ASM_TYPE +#define IFUNC_ASM_TYPE "gnu_indirect_function" +#endif + +#ifndef TLS_COMMON_ASM_OP +#define TLS_COMMON_ASM_OP ".tls_common" +#endif + +#if defined (HAVE_AS_TLS) && !defined (ASM_OUTPUT_TLS_COMMON) +#define ASM_OUTPUT_TLS_COMMON(FILE, DECL, NAME, SIZE) \ + do \ + { \ + fprintf ((FILE), "\t%s\t", TLS_COMMON_ASM_OP); \ + assemble_name ((FILE), (NAME)); \ + fprintf ((FILE), "," HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", \ + (SIZE), DECL_ALIGN (DECL) / BITS_PER_UNIT); \ + } \ + while (0) +#endif + +/* Decide whether to defer emitting the assembler output for an equate + of two values. The default is to not defer output. */ +#ifndef TARGET_DEFERRED_OUTPUT_DEFS +#define TARGET_DEFERRED_OUTPUT_DEFS(DECL,TARGET) false +#endif + +/* This is how to output the definition of a user-level label named + NAME, such as the label on variable NAME. */ + +#ifndef ASM_OUTPUT_LABEL +#define ASM_OUTPUT_LABEL(FILE,NAME) \ + do { \ + assemble_name ((FILE), (NAME)); \ + fputs (":\n", (FILE)); \ + } while (0) +#endif + +/* This is how to output the definition of a user-level label named + NAME, such as the label on a function. */ + +#ifndef ASM_OUTPUT_FUNCTION_LABEL +#define ASM_OUTPUT_FUNCTION_LABEL(FILE, NAME, DECL) \ + ASM_OUTPUT_LABEL ((FILE), (NAME)) +#endif + +/* Output the definition of a compiler-generated label named NAME. */ +#ifndef ASM_OUTPUT_INTERNAL_LABEL +#define ASM_OUTPUT_INTERNAL_LABEL(FILE,NAME) \ + do { \ + assemble_name_raw ((FILE), (NAME)); \ + fputs (":\n", (FILE)); \ + } while (0) +#endif + +/* This is how to output a reference to a user-level label named NAME. */ + +#ifndef ASM_OUTPUT_LABELREF +#define ASM_OUTPUT_LABELREF(FILE,NAME) \ + do { \ + fputs (user_label_prefix, (FILE)); \ + fputs ((NAME), (FILE)); \ + } while (0) +#endif + +/* Allow target to print debug info labels specially. This is useful for + VLIW targets, since debug info labels should go into the middle of + instruction bundles instead of breaking them. */ + +#ifndef ASM_OUTPUT_DEBUG_LABEL +#define ASM_OUTPUT_DEBUG_LABEL(FILE, PREFIX, NUM) \ + (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM) +#endif + +/* This is how we tell the assembler that a symbol is weak. */ +#ifndef ASM_OUTPUT_WEAK_ALIAS +#if defined (ASM_WEAKEN_LABEL) && defined (ASM_OUTPUT_DEF) +#define ASM_OUTPUT_WEAK_ALIAS(STREAM, NAME, VALUE) \ + do \ + { \ + ASM_WEAKEN_LABEL (STREAM, NAME); \ + if (VALUE) \ + ASM_OUTPUT_DEF (STREAM, NAME, VALUE); \ + } \ + while (0) +#endif +#endif + +/* This is how we tell the assembler that a symbol is a weak alias to + another symbol that doesn't require the other symbol to be defined. + Uses of the former will turn into weak uses of the latter, i.e., + uses that, in case the latter is undefined, will not cause errors, + and will add it to the symbol table as weak undefined. However, if + the latter is referenced directly, a strong reference prevails. */ +#ifndef ASM_OUTPUT_WEAKREF +#if defined HAVE_GAS_WEAKREF +#define ASM_OUTPUT_WEAKREF(FILE, DECL, NAME, VALUE) \ + do \ + { \ + fprintf ((FILE), "\t.weakref\t"); \ + assemble_name ((FILE), (NAME)); \ + fprintf ((FILE), ","); \ + assemble_name ((FILE), (VALUE)); \ + fprintf ((FILE), "\n"); \ + } \ + while (0) +#endif +#endif + +/* How to emit a .type directive. */ +#ifndef ASM_OUTPUT_TYPE_DIRECTIVE +#if defined TYPE_ASM_OP && defined TYPE_OPERAND_FMT +#define ASM_OUTPUT_TYPE_DIRECTIVE(STREAM, NAME, TYPE) \ + do \ + { \ + fputs (TYPE_ASM_OP, STREAM); \ + assemble_name (STREAM, NAME); \ + fputs (", ", STREAM); \ + fprintf (STREAM, TYPE_OPERAND_FMT, TYPE); \ + putc ('\n', STREAM); \ + } \ + while (0) +#endif +#endif + +/* How to emit a .size directive. */ +#ifndef ASM_OUTPUT_SIZE_DIRECTIVE +#ifdef SIZE_ASM_OP +#define ASM_OUTPUT_SIZE_DIRECTIVE(STREAM, NAME, SIZE) \ + do \ + { \ + HOST_WIDE_INT size_ = (SIZE); \ + fputs (SIZE_ASM_OP, STREAM); \ + assemble_name (STREAM, NAME); \ + fprintf (STREAM, ", " HOST_WIDE_INT_PRINT_DEC "\n", size_); \ + } \ + while (0) + +#define ASM_OUTPUT_MEASURED_SIZE(STREAM, NAME) \ + do \ + { \ + fputs (SIZE_ASM_OP, STREAM); \ + assemble_name (STREAM, NAME); \ + fputs (", .-", STREAM); \ + assemble_name (STREAM, NAME); \ + putc ('\n', STREAM); \ + } \ + while (0) + +#endif +#endif + +/* This determines whether or not we support weak symbols. SUPPORTS_WEAK + must be a preprocessor constant. */ +#ifndef SUPPORTS_WEAK +#if defined (ASM_WEAKEN_LABEL) || defined (ASM_WEAKEN_DECL) +#define SUPPORTS_WEAK 1 +#else +#define SUPPORTS_WEAK 0 +#endif +#endif + +/* This determines whether or not we support weak symbols during target + code generation. TARGET_SUPPORTS_WEAK can be any valid C expression. */ +#ifndef TARGET_SUPPORTS_WEAK +#define TARGET_SUPPORTS_WEAK (SUPPORTS_WEAK) +#endif + +/* This determines whether or not we support the discriminator + attribute in the .loc directive. */ +#ifndef SUPPORTS_DISCRIMINATOR +#ifdef HAVE_GAS_DISCRIMINATOR +#define SUPPORTS_DISCRIMINATOR 1 +#else +#define SUPPORTS_DISCRIMINATOR 0 +#endif +#endif + +/* This determines whether or not we support marking sections with + SHF_GNU_RETAIN flag. Also require .init_array/.fini_array section + for constructors and destructors. */ +#ifndef SUPPORTS_SHF_GNU_RETAIN +#if HAVE_GAS_SHF_GNU_RETAIN && HAVE_INITFINI_ARRAY_SUPPORT +#define SUPPORTS_SHF_GNU_RETAIN 1 +#else +#define SUPPORTS_SHF_GNU_RETAIN 0 +#endif +#endif + +/* This determines whether or not we support link-once semantics. */ +#ifndef SUPPORTS_ONE_ONLY +#ifdef MAKE_DECL_ONE_ONLY +#define SUPPORTS_ONE_ONLY 1 +#else +#define SUPPORTS_ONE_ONLY 0 +#endif +#endif + +/* This determines whether weak symbols must be left out of a static + archive's table of contents. Defining this macro to be nonzero has + the consequence that certain symbols will not be made weak that + otherwise would be. The C++ ABI requires this macro to be zero; + see the documentation. */ +#ifndef TARGET_WEAK_NOT_IN_ARCHIVE_TOC +#define TARGET_WEAK_NOT_IN_ARCHIVE_TOC 0 +#endif + +/* This determines whether or not we need linkonce unwind information. */ +#ifndef TARGET_USES_WEAK_UNWIND_INFO +#define TARGET_USES_WEAK_UNWIND_INFO 0 +#endif + +/* By default, there is no prefix on user-defined symbols. */ +#ifndef USER_LABEL_PREFIX +#define USER_LABEL_PREFIX "" +#endif + +/* If the target supports weak symbols, define TARGET_ATTRIBUTE_WEAK to + provide a weak attribute. Else define it to nothing. + + This would normally belong in ansidecl.h, but SUPPORTS_WEAK is + not available at that time. + + Note, this is only for use by target files which we know are to be + compiled by GCC. */ +#ifndef TARGET_ATTRIBUTE_WEAK +# if SUPPORTS_WEAK +# define TARGET_ATTRIBUTE_WEAK __attribute__ ((weak)) +# else +# define TARGET_ATTRIBUTE_WEAK +# endif +#endif + +/* By default we can assume that all global symbols are in one namespace, + across all shared libraries. */ +#ifndef MULTIPLE_SYMBOL_SPACES +# define MULTIPLE_SYMBOL_SPACES 0 +#endif + +/* If the target supports init_priority C++ attribute, give + SUPPORTS_INIT_PRIORITY a nonzero value. */ +#ifndef SUPPORTS_INIT_PRIORITY +#define SUPPORTS_INIT_PRIORITY 1 +#endif /* SUPPORTS_INIT_PRIORITY */ + +/* If we have a definition of INCOMING_RETURN_ADDR_RTX, assume that + the rest of the DWARF 2 frame unwind support is also provided. */ +#if !defined (DWARF2_UNWIND_INFO) && defined (INCOMING_RETURN_ADDR_RTX) +#define DWARF2_UNWIND_INFO 1 +#endif + +/* If we have named sections, and we're using crtstuff to run ctors, + use them for registering eh frame information. */ +#if defined (TARGET_ASM_NAMED_SECTION) && DWARF2_UNWIND_INFO \ + && !defined (EH_FRAME_THROUGH_COLLECT2) +#ifndef EH_FRAME_SECTION_NAME +#define EH_FRAME_SECTION_NAME ".eh_frame" +#endif +#endif + +/* On many systems, different EH table encodings are used under + difference circumstances. Some will require runtime relocations; + some will not. For those that do not require runtime relocations, + we would like to make the table read-only. However, since the + read-only tables may need to be combined with read-write tables + that do require runtime relocation, it is not safe to make the + tables read-only unless the linker will merge read-only and + read-write sections into a single read-write section. If your + linker does not have this ability, but your system is such that no + encoding used with non-PIC code will ever require a runtime + relocation, then you can define EH_TABLES_CAN_BE_READ_ONLY to 1 in + your target configuration file. */ +#ifndef EH_TABLES_CAN_BE_READ_ONLY +#ifdef HAVE_LD_RO_RW_SECTION_MIXING +#define EH_TABLES_CAN_BE_READ_ONLY 1 +#else +#define EH_TABLES_CAN_BE_READ_ONLY 0 +#endif +#endif + +/* Provide defaults for stuff that may not be defined when using + sjlj exceptions. */ +#ifndef EH_RETURN_DATA_REGNO +#define EH_RETURN_DATA_REGNO(N) INVALID_REGNUM +#endif + +/* Offset between the eh handler address and entry in eh tables. */ +#ifndef RETURN_ADDR_OFFSET +#define RETURN_ADDR_OFFSET 0 +#endif + +#ifndef MASK_RETURN_ADDR +#define MASK_RETURN_ADDR NULL_RTX +#endif + +/* Number of hardware registers that go into the DWARF-2 unwind info. + If not defined, equals FIRST_PSEUDO_REGISTER */ + +#ifndef DWARF_FRAME_REGISTERS +#define DWARF_FRAME_REGISTERS FIRST_PSEUDO_REGISTER +#endif + +/* Offsets recorded in opcodes are a multiple of this alignment factor. */ +#ifndef DWARF_CIE_DATA_ALIGNMENT +#ifdef STACK_GROWS_DOWNWARD +#define DWARF_CIE_DATA_ALIGNMENT (-((int) UNITS_PER_WORD)) +#else +#define DWARF_CIE_DATA_ALIGNMENT ((int) UNITS_PER_WORD) +#endif +#endif + +/* The DWARF 2 CFA column which tracks the return address. Normally this + is the column for PC, or the first column after all of the hard + registers. */ +#ifndef DWARF_FRAME_RETURN_COLUMN +#ifdef PC_REGNUM +#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (PC_REGNUM) +#else +#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGISTERS +#endif +#endif + +/* How to renumber registers for dbx and gdb. If not defined, assume + no renumbering is necessary. */ + +#ifndef DBX_REGISTER_NUMBER +#define DBX_REGISTER_NUMBER(REGNO) (REGNO) +#endif + +/* The mapping from gcc register number to DWARF 2 CFA column number. + By default, we just provide columns for all registers. */ +#ifndef DWARF_FRAME_REGNUM +#define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG) +#endif + +/* The mapping from dwarf CFA reg number to internal dwarf reg numbers. */ +#ifndef DWARF_REG_TO_UNWIND_COLUMN +#define DWARF_REG_TO_UNWIND_COLUMN(REGNO) (REGNO) +#endif + +/* Map register numbers held in the call frame info that gcc has + collected using DWARF_FRAME_REGNUM to those that should be output in + .debug_frame and .eh_frame. */ +#ifndef DWARF2_FRAME_REG_OUT +#define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) (REGNO) +#endif + +/* The size of addresses as they appear in the Dwarf 2 data. + Some architectures use word addresses to refer to code locations, + but Dwarf 2 info always uses byte addresses. On such machines, + Dwarf 2 addresses need to be larger than the architecture's + pointers. */ +#ifndef DWARF2_ADDR_SIZE +#define DWARF2_ADDR_SIZE ((POINTER_SIZE + BITS_PER_UNIT - 1) / BITS_PER_UNIT) +#endif + +/* The size in bytes of a DWARF field indicating an offset or length + relative to a debug info section, specified to be 4 bytes in the + DWARF-2 specification. The SGI/MIPS ABI defines it to be the same + as PTR_SIZE. */ +#ifndef DWARF_OFFSET_SIZE +#define DWARF_OFFSET_SIZE 4 +#endif + +/* The size in bytes of a DWARF 4 type signature. */ +#ifndef DWARF_TYPE_SIGNATURE_SIZE +#define DWARF_TYPE_SIGNATURE_SIZE 8 +#endif + +/* Default sizes for base C types. If the sizes are different for + your target, you should override these values by defining the + appropriate symbols in your tm.h file. */ + +#ifndef BITS_PER_WORD +#define BITS_PER_WORD (BITS_PER_UNIT * UNITS_PER_WORD) +#endif + +#ifndef CHAR_TYPE_SIZE +#define CHAR_TYPE_SIZE BITS_PER_UNIT +#endif + +#ifndef BOOL_TYPE_SIZE +/* `bool' has size and alignment `1', on almost all platforms. */ +#define BOOL_TYPE_SIZE CHAR_TYPE_SIZE +#endif + +#ifndef SHORT_TYPE_SIZE +#define SHORT_TYPE_SIZE (BITS_PER_UNIT * MIN ((UNITS_PER_WORD + 1) / 2, 2)) +#endif + +#ifndef INT_TYPE_SIZE +#define INT_TYPE_SIZE BITS_PER_WORD +#endif + +#ifndef LONG_TYPE_SIZE +#define LONG_TYPE_SIZE BITS_PER_WORD +#endif + +#ifndef LONG_LONG_TYPE_SIZE +#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2) +#endif + +#ifndef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE INT_TYPE_SIZE +#endif + +#ifndef FLOAT_TYPE_SIZE +#define FLOAT_TYPE_SIZE BITS_PER_WORD +#endif + +#ifndef DOUBLE_TYPE_SIZE +#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2) +#endif + +#ifndef LONG_DOUBLE_TYPE_SIZE +#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2) +#endif + +#ifndef DECIMAL32_TYPE_SIZE +#define DECIMAL32_TYPE_SIZE 32 +#endif + +#ifndef DECIMAL64_TYPE_SIZE +#define DECIMAL64_TYPE_SIZE 64 +#endif + +#ifndef DECIMAL128_TYPE_SIZE +#define DECIMAL128_TYPE_SIZE 128 +#endif + +#ifndef SHORT_FRACT_TYPE_SIZE +#define SHORT_FRACT_TYPE_SIZE BITS_PER_UNIT +#endif + +#ifndef FRACT_TYPE_SIZE +#define FRACT_TYPE_SIZE (BITS_PER_UNIT * 2) +#endif + +#ifndef LONG_FRACT_TYPE_SIZE +#define LONG_FRACT_TYPE_SIZE (BITS_PER_UNIT * 4) +#endif + +#ifndef LONG_LONG_FRACT_TYPE_SIZE +#define LONG_LONG_FRACT_TYPE_SIZE (BITS_PER_UNIT * 8) +#endif + +#ifndef SHORT_ACCUM_TYPE_SIZE +#define SHORT_ACCUM_TYPE_SIZE (SHORT_FRACT_TYPE_SIZE * 2) +#endif + +#ifndef ACCUM_TYPE_SIZE +#define ACCUM_TYPE_SIZE (FRACT_TYPE_SIZE * 2) +#endif + +#ifndef LONG_ACCUM_TYPE_SIZE +#define LONG_ACCUM_TYPE_SIZE (LONG_FRACT_TYPE_SIZE * 2) +#endif + +#ifndef LONG_LONG_ACCUM_TYPE_SIZE +#define LONG_LONG_ACCUM_TYPE_SIZE (LONG_LONG_FRACT_TYPE_SIZE * 2) +#endif + +/* We let tm.h override the types used here, to handle trivial differences + such as the choice of unsigned int or long unsigned int for size_t. + When machines start needing nontrivial differences in the size type, + it would be best to do something here to figure out automatically + from other information what type to use. */ + +#ifndef SIZE_TYPE +#define SIZE_TYPE "long unsigned int" +#endif + +#ifndef SIZETYPE +#define SIZETYPE SIZE_TYPE +#endif + +#ifndef PID_TYPE +#define PID_TYPE "int" +#endif + +/* If GCC knows the exact uint_least16_t and uint_least32_t types from + , use them for char16_t and char32_t. Otherwise, use + these guesses; getting the wrong type of a given width will not + affect C++ name mangling because in C++ these are distinct types + not typedefs. */ + +#ifndef CHAR8_TYPE +#define CHAR8_TYPE "unsigned char" +#endif + +#ifdef UINT_LEAST16_TYPE +#define CHAR16_TYPE UINT_LEAST16_TYPE +#else +#define CHAR16_TYPE "short unsigned int" +#endif + +#ifdef UINT_LEAST32_TYPE +#define CHAR32_TYPE UINT_LEAST32_TYPE +#else +#define CHAR32_TYPE "unsigned int" +#endif + +#ifndef WCHAR_TYPE +#define WCHAR_TYPE "int" +#endif + +/* WCHAR_TYPE gets overridden by -fshort-wchar. */ +#define MODIFIED_WCHAR_TYPE \ + (flag_short_wchar ? "short unsigned int" : WCHAR_TYPE) + +#ifndef PTRDIFF_TYPE +#define PTRDIFF_TYPE "long int" +#endif + +#ifndef WINT_TYPE +#define WINT_TYPE "unsigned int" +#endif + +#ifndef INTMAX_TYPE +#define INTMAX_TYPE ((INT_TYPE_SIZE == LONG_LONG_TYPE_SIZE) \ + ? "int" \ + : ((LONG_TYPE_SIZE == LONG_LONG_TYPE_SIZE) \ + ? "long int" \ + : "long long int")) +#endif + +#ifndef UINTMAX_TYPE +#define UINTMAX_TYPE ((INT_TYPE_SIZE == LONG_LONG_TYPE_SIZE) \ + ? "unsigned int" \ + : ((LONG_TYPE_SIZE == LONG_LONG_TYPE_SIZE) \ + ? "long unsigned int" \ + : "long long unsigned int")) +#endif + + +/* There are no default definitions of these types. */ + +#ifndef SIG_ATOMIC_TYPE +#define SIG_ATOMIC_TYPE ((const char *) NULL) +#endif + +#ifndef INT8_TYPE +#define INT8_TYPE ((const char *) NULL) +#endif + +#ifndef INT16_TYPE +#define INT16_TYPE ((const char *) NULL) +#endif + +#ifndef INT32_TYPE +#define INT32_TYPE ((const char *) NULL) +#endif + +#ifndef INT64_TYPE +#define INT64_TYPE ((const char *) NULL) +#endif + +#ifndef UINT8_TYPE +#define UINT8_TYPE ((const char *) NULL) +#endif + +#ifndef UINT16_TYPE +#define UINT16_TYPE ((const char *) NULL) +#endif + +#ifndef UINT32_TYPE +#define UINT32_TYPE ((const char *) NULL) +#endif + +#ifndef UINT64_TYPE +#define UINT64_TYPE ((const char *) NULL) +#endif + +#ifndef INT_LEAST8_TYPE +#define INT_LEAST8_TYPE ((const char *) NULL) +#endif + +#ifndef INT_LEAST16_TYPE +#define INT_LEAST16_TYPE ((const char *) NULL) +#endif + +#ifndef INT_LEAST32_TYPE +#define INT_LEAST32_TYPE ((const char *) NULL) +#endif + +#ifndef INT_LEAST64_TYPE +#define INT_LEAST64_TYPE ((const char *) NULL) +#endif + +#ifndef UINT_LEAST8_TYPE +#define UINT_LEAST8_TYPE ((const char *) NULL) +#endif + +#ifndef UINT_LEAST16_TYPE +#define UINT_LEAST16_TYPE ((const char *) NULL) +#endif + +#ifndef UINT_LEAST32_TYPE +#define UINT_LEAST32_TYPE ((const char *) NULL) +#endif + +#ifndef UINT_LEAST64_TYPE +#define UINT_LEAST64_TYPE ((const char *) NULL) +#endif + +#ifndef INT_FAST8_TYPE +#define INT_FAST8_TYPE ((const char *) NULL) +#endif + +#ifndef INT_FAST16_TYPE +#define INT_FAST16_TYPE ((const char *) NULL) +#endif + +#ifndef INT_FAST32_TYPE +#define INT_FAST32_TYPE ((const char *) NULL) +#endif + +#ifndef INT_FAST64_TYPE +#define INT_FAST64_TYPE ((const char *) NULL) +#endif + +#ifndef UINT_FAST8_TYPE +#define UINT_FAST8_TYPE ((const char *) NULL) +#endif + +#ifndef UINT_FAST16_TYPE +#define UINT_FAST16_TYPE ((const char *) NULL) +#endif + +#ifndef UINT_FAST32_TYPE +#define UINT_FAST32_TYPE ((const char *) NULL) +#endif + +#ifndef UINT_FAST64_TYPE +#define UINT_FAST64_TYPE ((const char *) NULL) +#endif + +#ifndef INTPTR_TYPE +#define INTPTR_TYPE ((const char *) NULL) +#endif + +#ifndef UINTPTR_TYPE +#define UINTPTR_TYPE ((const char *) NULL) +#endif + +/* Width in bits of a pointer. Mind the value of the macro `Pmode'. */ +#ifndef POINTER_SIZE +#define POINTER_SIZE BITS_PER_WORD +#endif +#ifndef POINTER_SIZE_UNITS +#define POINTER_SIZE_UNITS ((POINTER_SIZE + BITS_PER_UNIT - 1) / BITS_PER_UNIT) +#endif + + +#ifndef PIC_OFFSET_TABLE_REGNUM +#define PIC_OFFSET_TABLE_REGNUM INVALID_REGNUM +#endif + +#ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED +#define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED 0 +#endif + +#ifndef TARGET_DLLIMPORT_DECL_ATTRIBUTES +#define TARGET_DLLIMPORT_DECL_ATTRIBUTES 0 +#endif + +#ifndef TARGET_DECLSPEC +#if TARGET_DLLIMPORT_DECL_ATTRIBUTES +/* If the target supports the "dllimport" attribute, users are + probably used to the "__declspec" syntax. */ +#define TARGET_DECLSPEC 1 +#else +#define TARGET_DECLSPEC 0 +#endif +#endif + +/* By default, the preprocessor should be invoked the same way in C++ + as in C. */ +#ifndef CPLUSPLUS_CPP_SPEC +#ifdef CPP_SPEC +#define CPLUSPLUS_CPP_SPEC CPP_SPEC +#endif +#endif + +#ifndef ACCUMULATE_OUTGOING_ARGS +#define ACCUMULATE_OUTGOING_ARGS 0 +#endif + +/* By default, use the GNU runtime for Objective C. */ +#ifndef NEXT_OBJC_RUNTIME +#define NEXT_OBJC_RUNTIME 0 +#endif + +/* Decide whether a function's arguments should be processed + from first to last or from last to first. + + They should if the stack and args grow in opposite directions, but + only if we have push insns. */ + +#ifdef PUSH_ROUNDING + +#ifndef PUSH_ARGS_REVERSED +#if defined (STACK_GROWS_DOWNWARD) != defined (ARGS_GROW_DOWNWARD) +#define PUSH_ARGS_REVERSED targetm.calls.push_argument (0) +#endif +#endif + +#endif + +#ifndef PUSH_ARGS_REVERSED +#define PUSH_ARGS_REVERSED 0 +#endif + +/* Default value for the alignment (in bits) a C conformant malloc has to + provide. This default is intended to be safe and always correct. */ +#ifndef MALLOC_ABI_ALIGNMENT +#define MALLOC_ABI_ALIGNMENT BITS_PER_WORD +#endif + +/* If PREFERRED_STACK_BOUNDARY is not defined, set it to STACK_BOUNDARY. + STACK_BOUNDARY is required. */ +#ifndef PREFERRED_STACK_BOUNDARY +#define PREFERRED_STACK_BOUNDARY STACK_BOUNDARY +#endif + +/* Set INCOMING_STACK_BOUNDARY to PREFERRED_STACK_BOUNDARY if it is not + defined. */ +#ifndef INCOMING_STACK_BOUNDARY +#define INCOMING_STACK_BOUNDARY PREFERRED_STACK_BOUNDARY +#endif + +#ifndef TARGET_DEFAULT_PACK_STRUCT +#define TARGET_DEFAULT_PACK_STRUCT 0 +#endif + +/* By default, the vtable entries are void pointers, the so the alignment + is the same as pointer alignment. The value of this macro specifies + the alignment of the vtable entry in bits. It should be defined only + when special alignment is necessary. */ +#ifndef TARGET_VTABLE_ENTRY_ALIGN +#define TARGET_VTABLE_ENTRY_ALIGN POINTER_SIZE +#endif + +/* There are a few non-descriptor entries in the vtable at offsets below + zero. If these entries must be padded (say, to preserve the alignment + specified by TARGET_VTABLE_ENTRY_ALIGN), set this to the number of + words in each data entry. */ +#ifndef TARGET_VTABLE_DATA_ENTRY_DISTANCE +#define TARGET_VTABLE_DATA_ENTRY_DISTANCE 1 +#endif + +/* Decide whether it is safe to use a local alias for a virtual function + when constructing thunks. */ +#ifndef TARGET_USE_LOCAL_THUNK_ALIAS_P +#ifdef ASM_OUTPUT_DEF +#define TARGET_USE_LOCAL_THUNK_ALIAS_P(DECL) 1 +#else +#define TARGET_USE_LOCAL_THUNK_ALIAS_P(DECL) 0 +#endif +#endif + +/* Decide whether target supports aliases. */ +#ifndef TARGET_SUPPORTS_ALIASES +#ifdef ASM_OUTPUT_DEF +#define TARGET_SUPPORTS_ALIASES 1 +#else +#define TARGET_SUPPORTS_ALIASES 0 +#endif +#endif + +/* Select a format to encode pointers in exception handling data. We + prefer those that result in fewer dynamic relocations. Assume no + special support here and encode direct references. */ +#ifndef ASM_PREFERRED_EH_DATA_FORMAT +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) DW_EH_PE_absptr +#endif + +/* By default, the C++ compiler will use the lowest bit of the pointer + to function to indicate a pointer-to-member-function points to a + virtual member function. However, if FUNCTION_BOUNDARY indicates + function addresses aren't always even, the lowest bit of the delta + field will be used. */ +#ifndef TARGET_PTRMEMFUNC_VBIT_LOCATION +#define TARGET_PTRMEMFUNC_VBIT_LOCATION \ + (FUNCTION_BOUNDARY >= 2 * BITS_PER_UNIT \ + ? ptrmemfunc_vbit_in_pfn : ptrmemfunc_vbit_in_delta) +#endif + +#ifndef DEFAULT_GDB_EXTENSIONS +#define DEFAULT_GDB_EXTENSIONS 1 +#endif + +/* Default to DWARF2_DEBUGGING_INFO. Legacy targets can choose different + by defining PREFERRED_DEBUGGING_TYPE. */ +#ifndef PREFERRED_DEBUGGING_TYPE +#if defined DWARF2_DEBUGGING_INFO || defined DWARF2_LINENO_DEBUGGING_INFO +#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG +#else +#error You must define PREFERRED_DEBUGGING_TYPE if DWARF is not supported +#endif +#endif + +#ifndef FLOAT_LIB_COMPARE_RETURNS_BOOL +#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) false +#endif + +/* True if the targets integer-comparison functions return { 0, 1, 2 + } to indicate { <, ==, > }. False if { -1, 0, 1 } is used + instead. The libgcc routines are biased. */ +#ifndef TARGET_LIB_INT_CMP_BIASED +#define TARGET_LIB_INT_CMP_BIASED (true) +#endif + +/* If FLOAT_WORDS_BIG_ENDIAN is not defined in the header files, + then the word-endianness is the same as for integers. */ +#ifndef FLOAT_WORDS_BIG_ENDIAN +#define FLOAT_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN +#endif + +#ifndef REG_WORDS_BIG_ENDIAN +#define REG_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN +#endif + + +#ifndef TARGET_DEC_EVAL_METHOD +#define TARGET_DEC_EVAL_METHOD 2 +#endif + +#ifndef HAS_LONG_COND_BRANCH +#define HAS_LONG_COND_BRANCH 0 +#endif + +#ifndef HAS_LONG_UNCOND_BRANCH +#define HAS_LONG_UNCOND_BRANCH 0 +#endif + +/* Determine whether __cxa_atexit, rather than atexit, is used to + register C++ destructors for local statics and global objects. */ +#ifndef DEFAULT_USE_CXA_ATEXIT +#define DEFAULT_USE_CXA_ATEXIT 0 +#endif + +#if GCC_VERSION >= 3000 && defined IN_GCC +/* These old constraint macros shouldn't appear anywhere in a + configuration using MD constraint definitions. */ +#endif + +/* Determin whether the target runtime library is Bionic */ +#ifndef TARGET_HAS_BIONIC +#define TARGET_HAS_BIONIC 0 +#endif + +/* Indicate that CLZ and CTZ are undefined at zero. */ +#ifndef CLZ_DEFINED_VALUE_AT_ZERO +#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) 0 +#endif +#ifndef CTZ_DEFINED_VALUE_AT_ZERO +#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) 0 +#endif + +/* Provide a default value for STORE_FLAG_VALUE. */ +#ifndef STORE_FLAG_VALUE +#define STORE_FLAG_VALUE 1 +#endif + +/* This macro is used to determine what the largest unit size that + move_by_pieces can use is. */ + +/* MOVE_MAX_PIECES is the number of bytes at a time which we can + move efficiently, as opposed to MOVE_MAX which is the maximum + number of bytes we can move with a single instruction. */ + +#ifndef MOVE_MAX_PIECES +#define MOVE_MAX_PIECES MOVE_MAX +#endif + +/* STORE_MAX_PIECES is the number of bytes at a time that we can + store efficiently. Due to internal GCC limitations, this is + MOVE_MAX_PIECES limited by the number of bytes GCC can represent + for an immediate constant. */ + +#ifndef STORE_MAX_PIECES +#define STORE_MAX_PIECES MIN (MOVE_MAX_PIECES, 2 * sizeof (HOST_WIDE_INT)) +#endif + +/* Likewise for block comparisons. */ +#ifndef COMPARE_MAX_PIECES +#define COMPARE_MAX_PIECES MOVE_MAX_PIECES +#endif + +#ifndef MAX_MOVE_MAX +#define MAX_MOVE_MAX MOVE_MAX +#endif + +#ifndef MIN_UNITS_PER_WORD +#define MIN_UNITS_PER_WORD UNITS_PER_WORD +#endif + +#ifndef MAX_BITS_PER_WORD +#define MAX_BITS_PER_WORD BITS_PER_WORD +#endif + +#ifndef STACK_POINTER_OFFSET +#define STACK_POINTER_OFFSET 0 +#endif + +#ifndef LOCAL_REGNO +#define LOCAL_REGNO(REGNO) 0 +#endif + +#ifndef HONOR_REG_ALLOC_ORDER +#define HONOR_REG_ALLOC_ORDER 0 +#endif + +/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, + the stack pointer does not matter. The value is tested only in + functions that have frame pointers. */ +#ifndef EXIT_IGNORE_STACK +#define EXIT_IGNORE_STACK 0 +#endif + +/* Assume that case vectors are not pc-relative. */ +#ifndef CASE_VECTOR_PC_RELATIVE +#define CASE_VECTOR_PC_RELATIVE 0 +#endif + +/* Force minimum alignment to be able to use the least significant bits + for distinguishing descriptor addresses from code addresses. */ +#define FUNCTION_ALIGNMENT(ALIGN) \ + (lang_hooks.custom_function_descriptors \ + && targetm.calls.custom_function_descriptors > 0 \ + ? MAX ((ALIGN), \ + 2 * targetm.calls.custom_function_descriptors * BITS_PER_UNIT)\ + : (ALIGN)) + +/* Assume that trampolines need function alignment. */ +#ifndef TRAMPOLINE_ALIGNMENT +#define TRAMPOLINE_ALIGNMENT FUNCTION_ALIGNMENT (FUNCTION_BOUNDARY) +#endif + +/* Register mappings for target machines without register windows. */ +#ifndef INCOMING_REGNO +#define INCOMING_REGNO(N) (N) +#endif + +#ifndef OUTGOING_REGNO +#define OUTGOING_REGNO(N) (N) +#endif + +#ifndef SHIFT_COUNT_TRUNCATED +#define SHIFT_COUNT_TRUNCATED 0 +#endif + +#ifndef LEGITIMATE_PIC_OPERAND_P +#define LEGITIMATE_PIC_OPERAND_P(X) 1 +#endif + +#ifndef TARGET_MEM_CONSTRAINT +#define TARGET_MEM_CONSTRAINT 'm' +#endif + +#ifndef REVERSIBLE_CC_MODE +#define REVERSIBLE_CC_MODE(MODE) 0 +#endif + +/* Biggest alignment supported by the object file format of this machine. */ +#ifndef MAX_OFILE_ALIGNMENT +#define MAX_OFILE_ALIGNMENT BIGGEST_ALIGNMENT +#endif + +#ifndef FRAME_GROWS_DOWNWARD +#define FRAME_GROWS_DOWNWARD 0 +#endif + +#ifndef RETURN_ADDR_IN_PREVIOUS_FRAME +#define RETURN_ADDR_IN_PREVIOUS_FRAME 0 +#endif + +/* On most machines, the CFA coincides with the first incoming parm. */ +#ifndef ARG_POINTER_CFA_OFFSET +#define ARG_POINTER_CFA_OFFSET(FNDECL) \ + (FIRST_PARM_OFFSET (FNDECL) + crtl->args.pretend_args_size) +#endif + +/* On most machines, we use the CFA as DW_AT_frame_base. */ +#ifndef CFA_FRAME_BASE_OFFSET +#define CFA_FRAME_BASE_OFFSET(FNDECL) 0 +#endif + +/* The offset from the incoming value of %sp to the top of the stack frame + for the current function. */ +#ifndef INCOMING_FRAME_SP_OFFSET +#define INCOMING_FRAME_SP_OFFSET 0 +#endif + +#ifndef HARD_REGNO_NREGS_HAS_PADDING +#define HARD_REGNO_NREGS_HAS_PADDING(REGNO, MODE) 0 +#define HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE) -1 +#endif + +#ifndef OUTGOING_REG_PARM_STACK_SPACE +#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 0 +#endif + +/* MAX_STACK_ALIGNMENT is the maximum stack alignment guaranteed by + the backend. MAX_SUPPORTED_STACK_ALIGNMENT is the maximum best + effort stack alignment supported by the backend. If the backend + supports stack alignment, MAX_SUPPORTED_STACK_ALIGNMENT and + MAX_STACK_ALIGNMENT are the same. Otherwise, the incoming stack + boundary will limit the maximum guaranteed stack alignment. */ +#ifdef MAX_STACK_ALIGNMENT +#define MAX_SUPPORTED_STACK_ALIGNMENT MAX_STACK_ALIGNMENT +#else +#define MAX_STACK_ALIGNMENT STACK_BOUNDARY +#define MAX_SUPPORTED_STACK_ALIGNMENT PREFERRED_STACK_BOUNDARY +#endif + +#define SUPPORTS_STACK_ALIGNMENT (MAX_STACK_ALIGNMENT > STACK_BOUNDARY) + +#ifndef LOCAL_ALIGNMENT +#define LOCAL_ALIGNMENT(TYPE, ALIGNMENT) ALIGNMENT +#endif + +#ifndef STACK_SLOT_ALIGNMENT +#define STACK_SLOT_ALIGNMENT(TYPE,MODE,ALIGN) \ + ((TYPE) ? LOCAL_ALIGNMENT ((TYPE), (ALIGN)) : (ALIGN)) +#endif + +#ifndef LOCAL_DECL_ALIGNMENT +#define LOCAL_DECL_ALIGNMENT(DECL) \ + LOCAL_ALIGNMENT (TREE_TYPE (DECL), DECL_ALIGN (DECL)) +#endif + +#ifndef MINIMUM_ALIGNMENT +#define MINIMUM_ALIGNMENT(EXP,MODE,ALIGN) (ALIGN) +#endif + +/* Alignment value for attribute ((aligned)). */ +#ifndef ATTRIBUTE_ALIGNED_VALUE +#define ATTRIBUTE_ALIGNED_VALUE BIGGEST_ALIGNMENT +#endif + +/* For most ports anything that evaluates to a constant symbolic + or integer value is acceptable as a constant address. */ +#ifndef CONSTANT_ADDRESS_P +#define CONSTANT_ADDRESS_P(X) (CONSTANT_P (X) && GET_CODE (X) != CONST_DOUBLE) +#endif + +#ifndef MAX_FIXED_MODE_SIZE +#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode) +#endif + +/* Nonzero if structures and unions should be returned in memory. + + This should only be defined if compatibility with another compiler or + with an ABI is needed, because it results in slower code. */ + +#ifndef DEFAULT_PCC_STRUCT_RETURN +#define DEFAULT_PCC_STRUCT_RETURN 1 +#endif + +#ifndef PCC_BITFIELD_TYPE_MATTERS +#define PCC_BITFIELD_TYPE_MATTERS false +#endif + +#ifndef INSN_SETS_ARE_DELAYED +#define INSN_SETS_ARE_DELAYED(INSN) false +#endif + +#ifndef INSN_REFERENCES_ARE_DELAYED +#define INSN_REFERENCES_ARE_DELAYED(INSN) false +#endif + +#ifndef NO_FUNCTION_CSE +#define NO_FUNCTION_CSE false +#endif + +#ifndef HARD_REGNO_RENAME_OK +#define HARD_REGNO_RENAME_OK(FROM, TO) true +#endif + +#ifndef EPILOGUE_USES +#define EPILOGUE_USES(REG) false +#endif + +#ifndef ARGS_GROW_DOWNWARD +#define ARGS_GROW_DOWNWARD 0 +#endif + +#ifndef STACK_GROWS_DOWNWARD +#define STACK_GROWS_DOWNWARD 0 +#endif + +#ifndef STACK_PUSH_CODE +#if STACK_GROWS_DOWNWARD +#define STACK_PUSH_CODE PRE_DEC +#else +#define STACK_PUSH_CODE PRE_INC +#endif +#endif + +/* Default value for flag_pie when flag_pie is initialized to -1: + --enable-default-pie: Default flag_pie to -fPIE. + --disable-default-pie: Default flag_pie to 0. + */ +#ifdef ENABLE_DEFAULT_PIE +# ifndef DEFAULT_FLAG_PIE +# define DEFAULT_FLAG_PIE 2 +# endif +#else +# define DEFAULT_FLAG_PIE 0 +#endif + +#ifndef SWITCHABLE_TARGET +#define SWITCHABLE_TARGET 0 +#endif + +/* If the target supports integers that are wider than two + HOST_WIDE_INTs on the host compiler, then the target should define + TARGET_SUPPORTS_WIDE_INT and make the appropriate fixups. + Otherwise the compiler really is not robust. */ +#ifndef TARGET_SUPPORTS_WIDE_INT +#define TARGET_SUPPORTS_WIDE_INT 0 +#endif + +#ifndef SHORT_IMMEDIATES_SIGN_EXTEND +#define SHORT_IMMEDIATES_SIGN_EXTEND 0 +#endif + +#ifndef WORD_REGISTER_OPERATIONS +#define WORD_REGISTER_OPERATIONS 0 +#endif + +#ifndef LOAD_EXTEND_OP +#define LOAD_EXTEND_OP(M) UNKNOWN +#endif + +#ifndef INITIAL_FRAME_ADDRESS_RTX +#define INITIAL_FRAME_ADDRESS_RTX NULL +#endif + +#ifndef SETUP_FRAME_ADDRESSES +#define SETUP_FRAME_ADDRESSES() do { } while (0) +#endif + +#ifndef DYNAMIC_CHAIN_ADDRESS +#define DYNAMIC_CHAIN_ADDRESS(x) (x) +#endif + +#ifndef FRAME_ADDR_RTX +#define FRAME_ADDR_RTX(x) (x) +#endif + +#ifndef REVERSE_CONDITION +#define REVERSE_CONDITION(code, mode) reverse_condition (code) +#endif + +#ifndef TARGET_PECOFF +#define TARGET_PECOFF 0 +#endif + +#ifndef TARGET_COFF +#define TARGET_COFF 0 +#endif + +#ifndef EH_RETURN_HANDLER_RTX +#define EH_RETURN_HANDLER_RTX NULL +#endif + +#ifdef GCC_INSN_FLAGS_H +/* Dependent default target macro definitions + + This section of defaults.h defines target macros that depend on generated + headers. This is a bit awkward: We want to put all default definitions + for target macros in defaults.h, but some of the defaults depend on the + HAVE_* flags defines of insn-flags.h. But insn-flags.h is not always + included by files that do include defaults.h. + + Fortunately, the default macro definitions that depend on the HAVE_* + macros are also the ones that will only be used inside GCC itself, i.e. + not in the gen* programs or in target objects like libgcc. + + Obviously, it would be best to keep this section of defaults.h as small + as possible, by converting the macros defined below to target hooks or + functions. +*/ + +/* The default branch cost is 1. */ +#ifndef BRANCH_COST +#define BRANCH_COST(speed_p, predictable_p) 1 +#endif + +/* If a memory-to-memory move would take MOVE_RATIO or more simple + move-instruction sequences, we will do a cpymem or libcall instead. */ + +#ifndef MOVE_RATIO +#if defined (HAVE_cpymemqi) || defined (HAVE_cpymemhi) || defined (HAVE_cpymemsi) || defined (HAVE_cpymemdi) || defined (HAVE_cpymemti) +#define MOVE_RATIO(speed) 2 +#else +/* If we are optimizing for space (-Os), cut down the default move ratio. */ +#define MOVE_RATIO(speed) ((speed) ? 15 : 3) +#endif +#endif + +/* If a clear memory operation would take CLEAR_RATIO or more simple + move-instruction sequences, we will do a setmem or libcall instead. */ + +#ifndef CLEAR_RATIO +#if defined (HAVE_setmemqi) || defined (HAVE_setmemhi) || defined (HAVE_setmemsi) || defined (HAVE_setmemdi) || defined (HAVE_setmemti) +#define CLEAR_RATIO(speed) 2 +#else +/* If we are optimizing for space, cut down the default clear ratio. */ +#define CLEAR_RATIO(speed) ((speed) ? 15 :3) +#endif +#endif + +/* If a memory set (to value other than zero) operation would take + SET_RATIO or more simple move-instruction sequences, we will do a setmem + or libcall instead. */ +#ifndef SET_RATIO +#define SET_RATIO(speed) MOVE_RATIO (speed) +#endif + +/* Supply a default definition of STACK_SAVEAREA_MODE for emit_stack_save. + Normally move_insn, so Pmode stack pointer. */ + +#ifndef STACK_SAVEAREA_MODE +#define STACK_SAVEAREA_MODE(LEVEL) Pmode +#endif + +/* Supply a default definition of STACK_SIZE_MODE for + allocate_dynamic_stack_space. Normally PLUS/MINUS, so word_mode. */ + +#ifndef STACK_SIZE_MODE +#define STACK_SIZE_MODE word_mode +#endif + +/* Default value for flag_stack_protect when flag_stack_protect is initialized to -1: + --enable-default-ssp: Default flag_stack_protect to -fstack-protector-strong. + --disable-default-ssp: Default flag_stack_protect to 0. + */ +#ifdef ENABLE_DEFAULT_SSP +# ifndef DEFAULT_FLAG_SSP +# define DEFAULT_FLAG_SSP 3 +# endif +#else +# define DEFAULT_FLAG_SSP 0 +#endif + +/* Provide default values for the macros controlling stack checking. */ + +/* The default is neither full builtin stack checking... */ +#ifndef STACK_CHECK_BUILTIN +#define STACK_CHECK_BUILTIN 0 +#endif + +/* ...nor static builtin stack checking. */ +#ifndef STACK_CHECK_STATIC_BUILTIN +#define STACK_CHECK_STATIC_BUILTIN 0 +#endif + +/* The default interval is one page (4096 bytes). */ +#ifndef STACK_CHECK_PROBE_INTERVAL_EXP +#define STACK_CHECK_PROBE_INTERVAL_EXP 12 +#endif + +/* The default is not to move the stack pointer. */ +#ifndef STACK_CHECK_MOVING_SP +#define STACK_CHECK_MOVING_SP 0 +#endif + +/* This is a kludge to try to capture the discrepancy between the old + mechanism (generic stack checking) and the new mechanism (static + builtin stack checking). STACK_CHECK_PROTECT needs to be bumped + for the latter because part of the protection area is effectively + included in STACK_CHECK_MAX_FRAME_SIZE for the former. */ +#ifdef STACK_CHECK_PROTECT +#define STACK_OLD_CHECK_PROTECT STACK_CHECK_PROTECT +#else +#define STACK_OLD_CHECK_PROTECT \ + (!global_options.x_flag_exceptions \ + ? 75 * UNITS_PER_WORD \ + : targetm_common.except_unwind_info (&global_options) == UI_SJLJ \ + ? 4 * 1024 \ + : 8 * 1024) +#endif + +/* Minimum amount of stack required to recover from an anticipated stack + overflow detection. The default value conveys an estimate of the amount + of stack required to propagate an exception. */ +#ifndef STACK_CHECK_PROTECT +#define STACK_CHECK_PROTECT \ + (!global_options.x_flag_exceptions \ + ? 4 * 1024 \ + : targetm_common.except_unwind_info (&global_options) == UI_SJLJ \ + ? 8 * 1024 \ + : 12 * 1024) +#endif + +/* Make the maximum frame size be the largest we can and still only need + one probe per function. */ +#ifndef STACK_CHECK_MAX_FRAME_SIZE +#define STACK_CHECK_MAX_FRAME_SIZE \ + ((1 << STACK_CHECK_PROBE_INTERVAL_EXP) - UNITS_PER_WORD) +#endif + +/* This is arbitrary, but should be large enough everywhere. */ +#ifndef STACK_CHECK_FIXED_FRAME_SIZE +#define STACK_CHECK_FIXED_FRAME_SIZE (4 * UNITS_PER_WORD) +#endif + +/* Provide a reasonable default for the maximum size of an object to + allocate in the fixed frame. We may need to be able to make this + controllable by the user at some point. */ +#ifndef STACK_CHECK_MAX_VAR_SIZE +#define STACK_CHECK_MAX_VAR_SIZE (STACK_CHECK_MAX_FRAME_SIZE / 100) +#endif + +/* By default, the C++ compiler will use function addresses in the + vtable entries. Setting this nonzero tells the compiler to use + function descriptors instead. The value of this macro says how + many words wide the descriptor is (normally 2). It is assumed + that the address of a function descriptor may be treated as a + pointer to a function. */ +#ifndef TARGET_VTABLE_USES_DESCRIPTORS +#define TARGET_VTABLE_USES_DESCRIPTORS 0 +#endif + +#endif /* GCC_INSN_FLAGS_H */ + +#ifndef DWARF_GNAT_ENCODINGS_DEFAULT +#define DWARF_GNAT_ENCODINGS_DEFAULT DWARF_GNAT_ENCODINGS_GDB +#endif + +#ifndef USED_FOR_TARGET +/* Done this way to keep gengtype happy. */ +#if BITS_PER_UNIT == 8 +#define TARGET_UNIT uint8_t +#elif BITS_PER_UNIT == 16 +#define TARGET_UNIT uint16_t +#elif BITS_PER_UNIT == 32 +#define TARGET_UNIT uint32_t +#else +#error Unknown BITS_PER_UNIT +#endif +typedef TARGET_UNIT target_unit; +#endif + +#endif /* ! GCC_DEFAULTS_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/df.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/df.h new file mode 100644 index 0000000..bd32920 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/df.h @@ -0,0 +1,1252 @@ +/* Form lists of pseudo register references for autoinc optimization + for GNU compiler. This is part of flow optimization. + Copyright (C) 1999-2022 Free Software Foundation, Inc. + Originally contributed by Michael P. Hayes + (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com) + Major rewrite contributed by Danny Berlin (dberlin@dberlin.org) + and Kenneth Zadeck (zadeck@naturalbridge.com). + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DF_H +#define GCC_DF_H + +#include "regset.h" +#include "alloc-pool.h" +#include "timevar.h" + +struct dataflow; +class df_d; +struct df_problem; +struct df_link; +struct df_insn_info; +union df_ref_d; + +/* Data flow problems. All problems must have a unique id here. */ + +/* Scanning is not really a dataflow problem, but it is useful to have + the basic block functions in the vector so that things get done in + a uniform manner. The last four problems can be added or deleted + at any time are always defined (though LIVE is always there at -O2 + or higher); the others are always there. */ +enum df_problem_id + { + DF_SCAN, + DF_LR, /* Live Registers backward. */ + DF_LIVE, /* Live Registers & Uninitialized Registers */ + DF_RD, /* Reaching Defs. */ + DF_CHAIN, /* Def-Use and/or Use-Def Chains. */ + DF_WORD_LR, /* Subreg tracking lr. */ + DF_NOTE, /* REG_DEAD and REG_UNUSED notes. */ + DF_MD, /* Multiple Definitions. */ + DF_MIR, /* Must-initialized Registers. */ + + DF_LAST_PROBLEM_PLUS1 + }; + +/* Dataflow direction. */ +enum df_flow_dir + { + DF_NONE, + DF_FORWARD, + DF_BACKWARD + }; + +/* Descriminator for the various df_ref types. */ +enum df_ref_class {DF_REF_BASE, DF_REF_ARTIFICIAL, DF_REF_REGULAR}; + +/* The first of these us a set of a registers. The remaining three + are all uses of a register (the mem_load and mem_store relate to + how the register as an addressing operand). */ +enum df_ref_type {DF_REF_REG_DEF, DF_REF_REG_USE, + DF_REF_REG_MEM_LOAD, DF_REF_REG_MEM_STORE}; + +enum df_ref_flags + { + /* This flag is set if this ref occurs inside of a conditional + execution instruction. */ + DF_REF_CONDITIONAL = 1 << 0, + + /* If this flag is set for an artificial use or def, that ref + logically happens at the top of the block. If it is not set + for an artificial use or def, that ref logically happens at the + bottom of the block. This is never set for regular refs. */ + DF_REF_AT_TOP = 1 << 1, + + /* This flag is set if the use is inside a REG_EQUAL or REG_EQUIV + note. */ + DF_REF_IN_NOTE = 1 << 2, + + /* This bit is true if this ref can make regs_ever_live true for + this regno. */ + DF_HARD_REG_LIVE = 1 << 3, + + + /* This flag is set if this ref is a partial use or def of the + associated register. */ + DF_REF_PARTIAL = 1 << 4, + + /* Read-modify-write refs generate both a use and a def and + these are marked with this flag to show that they are not + independent. */ + DF_REF_READ_WRITE = 1 << 5, + + /* This flag is set if this ref, generally a def, may clobber the + referenced register. This is generally only set for hard + registers that cross a call site. With better information + about calls, some of these could be changed in the future to + DF_REF_MUST_CLOBBER. */ + DF_REF_MAY_CLOBBER = 1 << 6, + + /* This flag is set if this ref, generally a def, is a real + clobber. This is not currently set for registers live across a + call because that clobbering may or may not happen. + + Most of the uses of this are with sets that have a + GET_CODE(..)==CLOBBER. Note that this is set even if the + clobber is to a subreg. So in order to tell if the clobber + wipes out the entire register, it is necessary to also check + the DF_REF_PARTIAL flag. */ + DF_REF_MUST_CLOBBER = 1 << 7, + + + /* If the ref has one of the following two flags set, then the + struct df_ref can be cast to struct df_ref_extract to access + the width and offset fields. */ + + /* This flag is set if the ref contains a SIGN_EXTRACT. */ + DF_REF_SIGN_EXTRACT = 1 << 8, + + /* This flag is set if the ref contains a ZERO_EXTRACT. */ + DF_REF_ZERO_EXTRACT = 1 << 9, + + /* This flag is set if the ref contains a STRICT_LOW_PART. */ + DF_REF_STRICT_LOW_PART = 1 << 10, + + /* This flag is set if the ref contains a SUBREG. */ + DF_REF_SUBREG = 1 << 11, + + + /* This bit is true if this ref is part of a multiword hardreg. */ + DF_REF_MW_HARDREG = 1 << 12, + + /* This flag is set if this ref is a usage of the stack pointer by + a function call. */ + DF_REF_CALL_STACK_USAGE = 1 << 13, + + /* This flag is used for verification of existing refs. */ + DF_REF_REG_MARKER = 1 << 14, + + /* This flag is set if this ref is inside a pre/post modify. */ + DF_REF_PRE_POST_MODIFY = 1 << 15 + + }; + +/* The possible ordering of refs within the df_ref_info. */ +enum df_ref_order + { + /* There is not table. */ + DF_REF_ORDER_NO_TABLE, + + /* There is a table of refs but it is not (or no longer) organized + by one of the following methods. */ + DF_REF_ORDER_UNORDERED, + DF_REF_ORDER_UNORDERED_WITH_NOTES, + + /* Organize the table by reg order, all of the refs with regno 0 + followed by all of the refs with regno 1 ... . Within all of + the regs for a particular regno, the refs are unordered. */ + DF_REF_ORDER_BY_REG, + + /* For uses, the refs within eq notes may be added for + DF_REF_ORDER_BY_REG. */ + DF_REF_ORDER_BY_REG_WITH_NOTES, + + /* Organize the refs in insn order. The insns are ordered within a + block, and the blocks are ordered by FOR_ALL_BB_FN. */ + DF_REF_ORDER_BY_INSN, + + /* For uses, the refs within eq notes may be added for + DF_REF_ORDER_BY_INSN. */ + DF_REF_ORDER_BY_INSN_WITH_NOTES + }; + +/* Function prototypes added to df_problem instance. */ + +/* Allocate the problem specific data. */ +typedef void (*df_alloc_function) (bitmap); + +/* This function is called if the problem has global data that needs + to be cleared when ever the set of blocks changes. The bitmap + contains the set of blocks that may require special attention. + This call is only made if some of the blocks are going to change. + If everything is to be deleted, the wholesale deletion mechanisms + apply. */ +typedef void (*df_reset_function) (bitmap); + +/* Free the basic block info. Called from the block reordering code + to get rid of the blocks that have been squished down. */ +typedef void (*df_free_bb_function) (basic_block, void *); + +/* Local compute function. */ +typedef void (*df_local_compute_function) (bitmap); + +/* Init the solution specific data. */ +typedef void (*df_init_function) (bitmap); + +/* Iterative dataflow function. */ +typedef void (*df_dataflow_function) (struct dataflow *, bitmap, int *, int); + +/* Confluence operator for blocks with 0 out (or in) edges. */ +typedef void (*df_confluence_function_0) (basic_block); + +/* Confluence operator for blocks with 1 or more out (or in) edges. + Return true if BB input data has changed. */ +typedef bool (*df_confluence_function_n) (edge); + +/* Transfer function for blocks. + Return true if BB output data has changed. */ +typedef bool (*df_transfer_function) (int); + +/* Function to massage the information after the problem solving. */ +typedef void (*df_finalizer_function) (bitmap); + +/* Function to free all of the problem specific datastructures. */ +typedef void (*df_free_function) (void); + +/* Function to remove this problem from the stack of dataflow problems + without effecting the other problems in the stack except for those + that depend on this problem. */ +typedef void (*df_remove_problem_function) (void); + +/* Function to dump basic block independent results to FILE. */ +typedef void (*df_dump_problem_function) (FILE *); + +/* Function to dump top or bottom of basic block results to FILE. */ +typedef void (*df_dump_bb_problem_function) (basic_block, FILE *); + +/* Function to dump before or after an insn to FILE. */ +typedef void (*df_dump_insn_problem_function) (const rtx_insn *, FILE *); + +/* Function to dump top or bottom of basic block results to FILE. */ +typedef void (*df_verify_solution_start) (void); + +/* Function to dump top or bottom of basic block results to FILE. */ +typedef void (*df_verify_solution_end) (void); + +/* The static description of a dataflow problem to solve. See above + typedefs for doc for the function fields. */ + +struct df_problem { + /* The unique id of the problem. This is used it index into + df->defined_problems to make accessing the problem data easy. */ + enum df_problem_id id; + enum df_flow_dir dir; /* Dataflow direction. */ + df_alloc_function alloc_fun; + df_reset_function reset_fun; + df_free_bb_function free_bb_fun; + df_local_compute_function local_compute_fun; + df_init_function init_fun; + df_dataflow_function dataflow_fun; + df_confluence_function_0 con_fun_0; + df_confluence_function_n con_fun_n; + df_transfer_function trans_fun; + df_finalizer_function finalize_fun; + df_free_function free_fun; + df_remove_problem_function remove_problem_fun; + df_dump_problem_function dump_start_fun; + df_dump_bb_problem_function dump_top_fun; + df_dump_bb_problem_function dump_bottom_fun; + df_dump_insn_problem_function dump_insn_top_fun; + df_dump_insn_problem_function dump_insn_bottom_fun; + df_verify_solution_start verify_start_fun; + df_verify_solution_end verify_end_fun; + const struct df_problem *dependent_problem; + unsigned int block_info_elt_size; + + /* The timevar id associated with this pass. */ + timevar_id_t tv_id; + + /* True if the df_set_blocks should null out the basic block info if + this block drops out of df->blocks_to_analyze. */ + bool free_blocks_on_set_blocks; +}; + + +/* The specific instance of the problem to solve. */ +struct dataflow +{ + const struct df_problem *problem; /* The problem to be solved. */ + + /* Array indexed by bb->index, that contains basic block problem and + solution specific information. */ + void *block_info; + unsigned int block_info_size; + + /* The pool to allocate the block_info from. */ + object_allocator *block_pool; + + /* The lr and live problems have their transfer functions recomputed + only if necessary. This is possible for them because, the + problems are kept active for the entire backend and their + transfer functions are indexed by the REGNO. These are not + defined for any other problem. */ + bitmap out_of_date_transfer_functions; + + /* Other problem specific data that is not on a per basic block + basis. The structure is generally defined privately for the + problem. The exception being the scanning problem where it is + fully public. */ + void *problem_data; + + /* Local flags for some of the problems. */ + unsigned int local_flags; + + /* True if this problem of this instance has been initialized. This + is used by the dumpers to keep garbage out of the dumps if, for + debugging a dump is produced before the first call to + df_analyze after a new problem is added. */ + bool computed; + + /* True if the something has changed which invalidates the dataflow + solutions. Note that this bit is always true for all problems except + lr and live. */ + bool solutions_dirty; + + /* If true, this pass is deleted by df_finish_pass. This is never + true for DF_SCAN and DF_LR. It is true for DF_LIVE if optimize > + 1. It is always true for the other problems. */ + bool optional_p; +}; + + +/* The set of multiword hardregs used as operands to this + instruction. These are factored into individual uses and defs but + the aggregate is still needed to service the REG_DEAD and + REG_UNUSED notes. */ +struct df_mw_hardreg +{ + df_mw_hardreg *next; /* Next entry for this instruction. */ + rtx mw_reg; /* The multiword hardreg. */ + /* These two bitfields are intentionally oversized, in the hope that + accesses to 16-bit fields will usually be quicker. */ + ENUM_BITFIELD(df_ref_type) type : 16; + /* Used to see if the ref is read or write. */ + int flags : 16; /* Various df_ref_flags. */ + unsigned int start_regno; /* First word of the multi word subreg. */ + unsigned int end_regno; /* Last word of the multi word subreg. */ + unsigned int mw_order; /* Same as df_ref.ref_order. */ +}; + + +/* Define a register reference structure. One of these is allocated + for every register reference (use or def). Note some register + references (e.g., post_inc, subreg) generate both a def and a use. */ +struct df_base_ref +{ + /* These three bitfields are intentionally oversized, in the hope that + accesses to 8 and 16-bit fields will usually be quicker. */ + ENUM_BITFIELD(df_ref_class) cl : 8; + + ENUM_BITFIELD(df_ref_type) type : 8; + /* Type of ref. */ + int flags : 16; /* Various df_ref_flags. */ + unsigned int regno; /* The register number referenced. */ + rtx reg; /* The register referenced. */ + union df_ref_d *next_loc; /* Next ref for same insn or bb. */ + struct df_link *chain; /* Head of def-use, use-def. */ + /* Pointer to the insn info of the containing instruction. FIXME! + Currently this is NULL for artificial refs but this will be used + when FUDs are added. */ + struct df_insn_info *insn_info; + /* For each regno, there are three chains of refs, one for the uses, + the eq_uses and the defs. These chains go through the refs + themselves rather than using an external structure. */ + union df_ref_d *next_reg; /* Next ref with same regno and type. */ + union df_ref_d *prev_reg; /* Prev ref with same regno and type. */ + /* Location in the ref table. This is only valid after a call to + df_maybe_reorganize_[use,def]_refs which is an expensive operation. */ + int id; + /* The index at which the operand was scanned in the insn. This is + used to totally order the refs in an insn. */ + unsigned int ref_order; +}; + + +/* The three types of df_refs. Note that the df_ref_extract is an + extension of the df_regular_ref, not the df_base_ref. */ +struct df_artificial_ref +{ + struct df_base_ref base; + + /* Artificial refs do not have an insn, so to get the basic block, + it must be explicitly here. */ + basic_block bb; +}; + + +struct df_regular_ref +{ + struct df_base_ref base; + /* The loc is the address in the insn of the reg. This is not + defined for special registers, such as clobbers and stack + pointers that are also associated with call insns and so those + just use the base. */ + rtx *loc; +}; + +/* Union of the different kinds of defs/uses placeholders. */ +union df_ref_d +{ + struct df_base_ref base; + struct df_regular_ref regular_ref; + struct df_artificial_ref artificial_ref; +}; +typedef union df_ref_d *df_ref; + + +/* One of these structures is allocated for every insn. */ +struct df_insn_info +{ + rtx_insn *insn; /* The insn this info comes from. */ + df_ref defs; /* Head of insn-def chain. */ + df_ref uses; /* Head of insn-use chain. */ + /* Head of insn-use chain for uses in REG_EQUAL/EQUIV notes. */ + df_ref eq_uses; + struct df_mw_hardreg *mw_hardregs; + /* The logical uid of the insn in the basic block. This is valid + after any call to df_analyze but may rot after insns are added, + deleted or moved. */ + int luid; +}; + +/* These links are used for ref-ref chains. Currently only DEF-USE and + USE-DEF chains can be built by DF. */ +struct df_link +{ + df_ref ref; + struct df_link *next; +}; + + +enum df_chain_flags +{ + /* Flags that control the building of chains. */ + DF_DU_CHAIN = 1, /* Build DU chains. */ + DF_UD_CHAIN = 2 /* Build UD chains. */ +}; + +enum df_scan_flags +{ + /* Flags for the SCAN problem. */ + DF_SCAN_EMPTY_ENTRY_EXIT = 1 /* Don't define any registers in the entry + block; don't use any in the exit block. */ +}; + +enum df_changeable_flags +{ + /* Scanning flags. */ + /* Flag to control the running of dce as a side effect of building LR. */ + DF_LR_RUN_DCE = 1 << 0, /* Run DCE. */ + DF_NO_HARD_REGS = 1 << 1, /* Skip hard registers in RD and CHAIN Building. */ + + DF_EQ_NOTES = 1 << 2, /* Build chains with uses present in EQUIV/EQUAL notes. */ + DF_NO_REGS_EVER_LIVE = 1 << 3, /* Do not compute the regs_ever_live. */ + + /* Cause df_insn_rescan df_notes_rescan and df_insn_delete, to + return immediately. This is used by passes that know how to update + the scanning them selves. */ + DF_NO_INSN_RESCAN = 1 << 4, + + /* Cause df_insn_rescan df_notes_rescan and df_insn_delete, to + return after marking the insn for later processing. This allows all + rescans to be batched. */ + DF_DEFER_INSN_RESCAN = 1 << 5, + + /* Compute the reaching defs problem as "live and reaching defs" (LR&RD). + A DEF is reaching and live at insn I if DEF reaches I and REGNO(DEF) + is in LR_IN of the basic block containing I. */ + DF_RD_PRUNE_DEAD_DEFS = 1 << 6, + + DF_VERIFY_SCHEDULED = 1 << 7 +}; + +/* Two of these structures are inline in df, one for the uses and one + for the defs. This structure is only contains the refs within the + boundary of the df_set_blocks if that has been defined. */ +struct df_ref_info +{ + df_ref *refs; /* Ref table, indexed by id. */ + unsigned int *begin; /* First ref_index for this pseudo. */ + unsigned int *count; /* Count of refs for this pseudo. */ + unsigned int refs_size; /* Size of currently allocated refs table. */ + + /* Table_size is the number of elements in the refs table. This + will also be the width of the bitvectors in the rd and ru + problems. Total_size is the number of refs. These will be the + same if the focus has not been reduced by df_set_blocks. If the + focus has been reduced, table_size will be smaller since it only + contains the refs in the set blocks. */ + unsigned int table_size; + unsigned int total_size; + + enum df_ref_order ref_order; +}; + +/* Three of these structures are allocated for every pseudo reg. One + for the uses, one for the eq_uses and one for the defs. */ +struct df_reg_info +{ + /* Head of chain for refs of that type and regno. */ + df_ref reg_chain; + /* Number of refs in the chain. */ + unsigned int n_refs; +}; + + +/*---------------------------------------------------------------------------- + Problem data for the scanning dataflow problem. Unlike the other + dataflow problems, the problem data for scanning is fully exposed and + used by owners of the problem. +----------------------------------------------------------------------------*/ + +class df_d +{ +public: + + /* The set of problems to be solved is stored in two arrays. In + PROBLEMS_IN_ORDER, the problems are stored in the order that they + are solved. This is an internally dense array that may have + nulls at the end of it. In PROBLEMS_BY_INDEX, the problem is + stored by the value in df_problem.id. These are used to access + the problem local data without having to search the first + array. */ + + struct dataflow *problems_in_order[DF_LAST_PROBLEM_PLUS1]; + struct dataflow *problems_by_index[DF_LAST_PROBLEM_PLUS1]; + + /* If not NULL, this subset of blocks of the program to be + considered for analysis. At certain times, this will contain all + the blocks in the function so it cannot be used as an indicator + of if we are analyzing a subset. See analyze_subset. */ + bitmap blocks_to_analyze; + + /* The following information is really the problem data for the + scanning instance but it is used too often by the other problems + to keep getting it from there. */ + struct df_ref_info def_info; /* Def info. */ + struct df_ref_info use_info; /* Use info. */ + + /* The following three arrays are allocated in parallel. They contain + the sets of refs of each type for each reg. */ + struct df_reg_info **def_regs; /* Def reg info. */ + struct df_reg_info **use_regs; /* Eq_use reg info. */ + struct df_reg_info **eq_use_regs; /* Eq_use info. */ + unsigned int regs_size; /* Size of currently allocated regs table. */ + unsigned int regs_inited; /* Number of regs with reg_infos allocated. */ + + + struct df_insn_info **insns; /* Insn table, indexed by insn UID. */ + unsigned int insns_size; /* Size of insn table. */ + + int num_problems_defined; + + bitmap_head hardware_regs_used; /* The set of hardware registers used. */ + /* The set of hard regs that are in the artificial uses at the end + of a regular basic block. */ + bitmap_head regular_block_artificial_uses; + /* The set of hard regs that are in the artificial uses at the end + of a basic block that has an EH pred. */ + bitmap_head eh_block_artificial_uses; + /* The set of hardware registers live on entry to the function. */ + bitmap entry_block_defs; + bitmap exit_block_uses; /* The set of hardware registers used in exit block. */ + + /* Insns to delete, rescan or reprocess the notes at next + df_rescan_all or df_process_deferred_rescans. */ + bitmap_head insns_to_delete; + bitmap_head insns_to_rescan; + bitmap_head insns_to_notes_rescan; + int *postorder; /* The current set of basic blocks + in reverse postorder. */ + vec postorder_inverted; /* The current set of basic blocks + in reverse postorder of inverted CFG. */ + int n_blocks; /* The number of blocks in reverse postorder. */ + + /* An array [FIRST_PSEUDO_REGISTER], indexed by regno, of the number + of refs that qualify as being real hard regs uses. Artificial + uses and defs as well as refs in eq notes are ignored. If the + ref is a def, it cannot be a MAY_CLOBBER def. If the ref is a + use, it cannot be the emim_reg_set or be the frame or arg pointer + register. Uses in debug insns are ignored. + + IT IS NOT ACCEPTABLE TO MANUALLY CHANGE THIS ARRAY. This array + always reflects the actual number of refs in the insn stream that + satisfy the above criteria. */ + unsigned int *hard_regs_live_count; + + /* This counter provides a way to totally order refs without using + addresses. It is incremented whenever a ref is created. */ + unsigned int ref_order; + + /* Problem specific control information. This is a combination of + enum df_changeable_flags values. */ + int changeable_flags : 8; + + /* If this is true, then only a subset of the blocks of the program + is considered to compute the solutions of dataflow problems. */ + bool analyze_subset; + + /* True if someone added or deleted something from regs_ever_live so + that the entry and exit blocks need be reprocessed. */ + bool redo_entry_and_exit; +}; + +#define DF_SCAN_BB_INFO(BB) (df_scan_get_bb_info ((BB)->index)) +#define DF_RD_BB_INFO(BB) (df_rd_get_bb_info ((BB)->index)) +#define DF_LR_BB_INFO(BB) (df_lr_get_bb_info ((BB)->index)) +#define DF_LIVE_BB_INFO(BB) (df_live_get_bb_info ((BB)->index)) +#define DF_WORD_LR_BB_INFO(BB) (df_word_lr_get_bb_info ((BB)->index)) +#define DF_MD_BB_INFO(BB) (df_md_get_bb_info ((BB)->index)) +#define DF_MIR_BB_INFO(BB) (df_mir_get_bb_info ((BB)->index)) + +/* Most transformations that wish to use live register analysis will + use these macros. This info is the and of the lr and live sets. */ +#define DF_LIVE_IN(BB) (&DF_LIVE_BB_INFO (BB)->in) +#define DF_LIVE_OUT(BB) (&DF_LIVE_BB_INFO (BB)->out) + +#define DF_MIR_IN(BB) (&DF_MIR_BB_INFO (BB)->in) +#define DF_MIR_OUT(BB) (&DF_MIR_BB_INFO (BB)->out) + +/* These macros are used by passes that are not tolerant of + uninitialized variables. This intolerance should eventually + be fixed. */ +#define DF_LR_IN(BB) (&DF_LR_BB_INFO (BB)->in) +#define DF_LR_OUT(BB) (&DF_LR_BB_INFO (BB)->out) + +/* These macros are used by passes that are not tolerant of + uninitialized variables. This intolerance should eventually + be fixed. */ +#define DF_WORD_LR_IN(BB) (&DF_WORD_LR_BB_INFO (BB)->in) +#define DF_WORD_LR_OUT(BB) (&DF_WORD_LR_BB_INFO (BB)->out) + +/* Macros to access the elements within the ref structure. */ + + +#define DF_REF_REAL_REG(REF) (GET_CODE ((REF)->base.reg) == SUBREG \ + ? SUBREG_REG ((REF)->base.reg) : ((REF)->base.reg)) +#define DF_REF_REGNO(REF) ((REF)->base.regno) +#define DF_REF_REAL_LOC(REF) (GET_CODE (*((REF)->regular_ref.loc)) == SUBREG \ + ? &SUBREG_REG (*((REF)->regular_ref.loc)) : ((REF)->regular_ref.loc)) +#define DF_REF_REG(REF) ((REF)->base.reg) +#define DF_REF_LOC(REF) (DF_REF_CLASS (REF) == DF_REF_REGULAR ? \ + (REF)->regular_ref.loc : NULL) +#define DF_REF_BB(REF) (DF_REF_IS_ARTIFICIAL (REF) \ + ? (REF)->artificial_ref.bb \ + : BLOCK_FOR_INSN (DF_REF_INSN (REF))) +#define DF_REF_BBNO(REF) (DF_REF_BB (REF)->index) +#define DF_REF_INSN_INFO(REF) ((REF)->base.insn_info) +#define DF_REF_INSN(REF) ((REF)->base.insn_info->insn) +#define DF_REF_INSN_UID(REF) (INSN_UID (DF_REF_INSN(REF))) +#define DF_REF_CLASS(REF) ((REF)->base.cl) +#define DF_REF_TYPE(REF) ((REF)->base.type) +#define DF_REF_CHAIN(REF) ((REF)->base.chain) +#define DF_REF_ID(REF) ((REF)->base.id) +#define DF_REF_FLAGS(REF) ((REF)->base.flags) +#define DF_REF_FLAGS_IS_SET(REF, v) ((DF_REF_FLAGS (REF) & (v)) != 0) +#define DF_REF_FLAGS_SET(REF, v) (DF_REF_FLAGS (REF) |= (v)) +#define DF_REF_FLAGS_CLEAR(REF, v) (DF_REF_FLAGS (REF) &= ~(v)) +#define DF_REF_ORDER(REF) ((REF)->base.ref_order) +/* If DF_REF_IS_ARTIFICIAL () is true, this is not a real + definition/use, but an artificial one created to model always live + registers, eh uses, etc. */ +#define DF_REF_IS_ARTIFICIAL(REF) (DF_REF_CLASS (REF) == DF_REF_ARTIFICIAL) +#define DF_REF_REG_MARK(REF) (DF_REF_FLAGS_SET ((REF),DF_REF_REG_MARKER)) +#define DF_REF_REG_UNMARK(REF) (DF_REF_FLAGS_CLEAR ((REF),DF_REF_REG_MARKER)) +#define DF_REF_IS_REG_MARKED(REF) (DF_REF_FLAGS_IS_SET ((REF),DF_REF_REG_MARKER)) +#define DF_REF_NEXT_LOC(REF) ((REF)->base.next_loc) +#define DF_REF_NEXT_REG(REF) ((REF)->base.next_reg) +#define DF_REF_PREV_REG(REF) ((REF)->base.prev_reg) +/* The following two macros may only be applied if one of + DF_REF_SIGN_EXTRACT | DF_REF_ZERO_EXTRACT is true. */ +#define DF_REF_EXTRACT_WIDTH(REF) ((REF)->extract_ref.width) +#define DF_REF_EXTRACT_OFFSET(REF) ((REF)->extract_ref.offset) +#define DF_REF_EXTRACT_MODE(REF) ((REF)->extract_ref.mode) + +/* Macros to determine the reference type. */ +#define DF_REF_REG_DEF_P(REF) (DF_REF_TYPE (REF) == DF_REF_REG_DEF) +#define DF_REF_REG_USE_P(REF) (!DF_REF_REG_DEF_P (REF)) +#define DF_REF_REG_MEM_STORE_P(REF) (DF_REF_TYPE (REF) == DF_REF_REG_MEM_STORE) +#define DF_REF_REG_MEM_LOAD_P(REF) (DF_REF_TYPE (REF) == DF_REF_REG_MEM_LOAD) +#define DF_REF_REG_MEM_P(REF) (DF_REF_REG_MEM_STORE_P (REF) \ + || DF_REF_REG_MEM_LOAD_P (REF)) + +#define DF_MWS_REG_DEF_P(MREF) (DF_MWS_TYPE (MREF) == DF_REF_REG_DEF) +#define DF_MWS_REG_USE_P(MREF) (!DF_MWS_REG_DEF_P (MREF)) +#define DF_MWS_NEXT(MREF) ((MREF)->next) +#define DF_MWS_TYPE(MREF) ((MREF)->type) + +/* Macros to get the refs out of def_info or use_info refs table. If + the focus of the dataflow has been set to some subset of blocks + with df_set_blocks, these macros will only find the uses and defs + in that subset of blocks. + + These macros should be used with care. The def macros are only + usable after a call to df_maybe_reorganize_def_refs and the use + macros are only usable after a call to + df_maybe_reorganize_use_refs. HOWEVER, BUILDING AND USING THESE + ARRAYS ARE A CACHE LOCALITY KILLER. */ + +#define DF_DEFS_TABLE_SIZE() (df->def_info.table_size) +#define DF_DEFS_GET(ID) (df->def_info.refs[(ID)]) +#define DF_DEFS_SET(ID,VAL) (df->def_info.refs[(ID)]=(VAL)) +#define DF_DEFS_COUNT(ID) (df->def_info.count[(ID)]) +#define DF_DEFS_BEGIN(ID) (df->def_info.begin[(ID)]) +#define DF_USES_TABLE_SIZE() (df->use_info.table_size) +#define DF_USES_GET(ID) (df->use_info.refs[(ID)]) +#define DF_USES_SET(ID,VAL) (df->use_info.refs[(ID)]=(VAL)) +#define DF_USES_COUNT(ID) (df->use_info.count[(ID)]) +#define DF_USES_BEGIN(ID) (df->use_info.begin[(ID)]) + +/* Macros to access the register information from scan dataflow record. */ + +#define DF_REG_SIZE(DF) (df->regs_inited) +#define DF_REG_DEF_GET(REG) (df->def_regs[(REG)]) +#define DF_REG_DEF_CHAIN(REG) (df->def_regs[(REG)]->reg_chain) +#define DF_REG_DEF_COUNT(REG) (df->def_regs[(REG)]->n_refs) +#define DF_REG_USE_GET(REG) (df->use_regs[(REG)]) +#define DF_REG_USE_CHAIN(REG) (df->use_regs[(REG)]->reg_chain) +#define DF_REG_USE_COUNT(REG) (df->use_regs[(REG)]->n_refs) +#define DF_REG_EQ_USE_GET(REG) (df->eq_use_regs[(REG)]) +#define DF_REG_EQ_USE_CHAIN(REG) (df->eq_use_regs[(REG)]->reg_chain) +#define DF_REG_EQ_USE_COUNT(REG) (df->eq_use_regs[(REG)]->n_refs) + +/* Macros to access the elements within the reg_info structure table. */ + +#define DF_REGNO_FIRST_DEF(REGNUM) \ +(DF_REG_DEF_GET(REGNUM) ? DF_REG_DEF_GET (REGNUM) : 0) +#define DF_REGNO_LAST_USE(REGNUM) \ +(DF_REG_USE_GET(REGNUM) ? DF_REG_USE_GET (REGNUM) : 0) + +/* Macros to access the elements within the insn_info structure table. */ + +#define DF_INSN_SIZE() ((df)->insns_size) +#define DF_INSN_INFO_GET(INSN) (df->insns[(INSN_UID (INSN))]) +#define DF_INSN_INFO_SET(INSN,VAL) (df->insns[(INSN_UID (INSN))]=(VAL)) +#define DF_INSN_INFO_LUID(II) ((II)->luid) +#define DF_INSN_INFO_DEFS(II) ((II)->defs) +#define DF_INSN_INFO_USES(II) ((II)->uses) +#define DF_INSN_INFO_EQ_USES(II) ((II)->eq_uses) +#define DF_INSN_INFO_MWS(II) ((II)->mw_hardregs) + +#define DF_INSN_LUID(INSN) (DF_INSN_INFO_LUID (DF_INSN_INFO_GET (INSN))) +#define DF_INSN_DEFS(INSN) (DF_INSN_INFO_DEFS (DF_INSN_INFO_GET (INSN))) +#define DF_INSN_USES(INSN) (DF_INSN_INFO_USES (DF_INSN_INFO_GET (INSN))) +#define DF_INSN_EQ_USES(INSN) (DF_INSN_INFO_EQ_USES (DF_INSN_INFO_GET (INSN))) + +#define DF_INSN_UID_GET(UID) (df->insns[(UID)]) +#define DF_INSN_UID_SET(UID,VAL) (df->insns[(UID)]=(VAL)) +#define DF_INSN_UID_SAFE_GET(UID) (((unsigned)(UID) < DF_INSN_SIZE ()) \ + ? DF_INSN_UID_GET (UID) \ + : NULL) +#define DF_INSN_UID_LUID(INSN) (DF_INSN_UID_GET (INSN)->luid) +#define DF_INSN_UID_DEFS(INSN) (DF_INSN_UID_GET (INSN)->defs) +#define DF_INSN_UID_USES(INSN) (DF_INSN_UID_GET (INSN)->uses) +#define DF_INSN_UID_EQ_USES(INSN) (DF_INSN_UID_GET (INSN)->eq_uses) +#define DF_INSN_UID_MWS(INSN) (DF_INSN_UID_GET (INSN)->mw_hardregs) + +#define FOR_EACH_INSN_INFO_DEF(ITER, INSN) \ + for (ITER = DF_INSN_INFO_DEFS (INSN); ITER; ITER = DF_REF_NEXT_LOC (ITER)) + +#define FOR_EACH_INSN_INFO_USE(ITER, INSN) \ + for (ITER = DF_INSN_INFO_USES (INSN); ITER; ITER = DF_REF_NEXT_LOC (ITER)) + +#define FOR_EACH_INSN_INFO_EQ_USE(ITER, INSN) \ + for (ITER = DF_INSN_INFO_EQ_USES (INSN); ITER; ITER = DF_REF_NEXT_LOC (ITER)) + +#define FOR_EACH_INSN_INFO_MW(ITER, INSN) \ + for (ITER = DF_INSN_INFO_MWS (INSN); ITER; ITER = DF_MWS_NEXT (ITER)) + +#define FOR_EACH_INSN_DEF(ITER, INSN) \ + FOR_EACH_INSN_INFO_DEF(ITER, DF_INSN_INFO_GET (INSN)) + +#define FOR_EACH_INSN_USE(ITER, INSN) \ + FOR_EACH_INSN_INFO_USE(ITER, DF_INSN_INFO_GET (INSN)) + +#define FOR_EACH_INSN_EQ_USE(ITER, INSN) \ + FOR_EACH_INSN_INFO_EQ_USE(ITER, DF_INSN_INFO_GET (INSN)) + +#define FOR_EACH_ARTIFICIAL_USE(ITER, BB_INDEX) \ + for (ITER = df_get_artificial_uses (BB_INDEX); ITER; \ + ITER = DF_REF_NEXT_LOC (ITER)) + +#define FOR_EACH_ARTIFICIAL_DEF(ITER, BB_INDEX) \ + for (ITER = df_get_artificial_defs (BB_INDEX); ITER; \ + ITER = DF_REF_NEXT_LOC (ITER)) + +/* An obstack for bitmap not related to specific dataflow problems. + This obstack should e.g. be used for bitmaps with a short life time + such as temporary bitmaps. This obstack is declared in df-core.cc. */ + +extern bitmap_obstack df_bitmap_obstack; + + +/* One of these structures is allocated for every basic block. */ +struct df_scan_bb_info +{ + /* The entry block has many artificial defs and these are at the + bottom of the block. + + Blocks that are targets of exception edges may have some + artificial defs. These are logically located at the top of the + block. + + Blocks that are the targets of non-local goto's have the hard + frame pointer defined at the top of the block. */ + df_ref artificial_defs; + + /* Blocks that are targets of exception edges may have some + artificial uses. These are logically at the top of the block. + + Most blocks have artificial uses at the bottom of the block. */ + df_ref artificial_uses; +}; + + +/* Reaching definitions. All bitmaps are indexed by the id field of + the ref except sparse_kill which is indexed by regno. For the + LR&RD problem, the kill set is not complete: It does not contain + DEFs killed because the set register has died in the LR set. */ +class df_rd_bb_info +{ +public: + /* Local sets to describe the basic blocks. */ + bitmap_head kill; + bitmap_head sparse_kill; + bitmap_head gen; /* The set of defs generated in this block. */ + + /* The results of the dataflow problem. */ + bitmap_head in; /* At the top of the block. */ + bitmap_head out; /* At the bottom of the block. */ +}; + + +/* Multiple reaching definitions. All bitmaps are referenced by the + register number. */ + +class df_md_bb_info +{ +public: + /* Local sets to describe the basic blocks. */ + bitmap_head gen; /* Partial/conditional definitions live at BB out. */ + bitmap_head kill; /* Other definitions that are live at BB out. */ + bitmap_head init; /* Definitions coming from dominance frontier edges. */ + + /* The results of the dataflow problem. */ + bitmap_head in; /* Just before the block itself. */ + bitmap_head out; /* At the bottom of the block. */ +}; + + +/* Live registers, a backwards dataflow problem. All bitmaps are + referenced by the register number. */ + +class df_lr_bb_info +{ +public: + /* Local sets to describe the basic blocks. */ + bitmap_head def; /* The set of registers set in this block + - except artificial defs at the top. */ + bitmap_head use; /* The set of registers used in this block. */ + + /* The results of the dataflow problem. */ + bitmap_head in; /* Just before the block itself. */ + bitmap_head out; /* At the bottom of the block. */ +}; + + +/* Uninitialized registers. All bitmaps are referenced by the + register number. Anded results of the forwards and backward live + info. Note that the forwards live information is not available + separately. */ +class df_live_bb_info +{ +public: + /* Local sets to describe the basic blocks. */ + bitmap_head kill; /* The set of registers unset in this block. Calls, + for instance, unset registers. */ + bitmap_head gen; /* The set of registers set in this block. */ + + /* The results of the dataflow problem. */ + bitmap_head in; /* At the top of the block. */ + bitmap_head out; /* At the bottom of the block. */ +}; + + +/* Live registers, a backwards dataflow problem. These bitmaps are + indexed by 2 * regno for each pseudo and have two entries for each + pseudo. Only pseudos that have a size of 2 * UNITS_PER_WORD are + meaningfully tracked. */ + +class df_word_lr_bb_info +{ +public: + /* Local sets to describe the basic blocks. */ + bitmap_head def; /* The set of registers set in this block + - except artificial defs at the top. */ + bitmap_head use; /* The set of registers used in this block. */ + + /* The results of the dataflow problem. */ + bitmap_head in; /* Just before the block itself. */ + bitmap_head out; /* At the bottom of the block. */ +}; + +/* Must-initialized registers. All bitmaps are referenced by the + register number. */ +class df_mir_bb_info +{ +public: + /* Local sets to describe the basic blocks. */ + bitmap_head kill; /* The set of registers unset in this block. Calls, + for instance, unset registers. */ + bitmap_head gen; /* The set of registers set in this block, excluding the + ones killed later on in this block. */ + + /* The results of the dataflow problem. */ + bitmap_head in; /* At the top of the block. */ + bitmap_head out; /* At the bottom of the block. */ + bool con_visited; /* Visited by con_fun_{0,n}. */ +}; + + +/* This is used for debugging and for the dumpers to find the latest + instance so that the df info can be added to the dumps. This + should not be used by regular code. */ +extern class df_d *df; +#define df_scan (df->problems_by_index[DF_SCAN]) +#define df_rd (df->problems_by_index[DF_RD]) +#define df_lr (df->problems_by_index[DF_LR]) +#define df_live (df->problems_by_index[DF_LIVE]) +#define df_chain (df->problems_by_index[DF_CHAIN]) +#define df_word_lr (df->problems_by_index[DF_WORD_LR]) +#define df_note (df->problems_by_index[DF_NOTE]) +#define df_md (df->problems_by_index[DF_MD]) +#define df_mir (df->problems_by_index[DF_MIR]) + +/* This symbol turns on checking that each modification of the cfg has + been identified to the appropriate df routines. It is not part of + verification per se because the check that the final solution has + not changed covers this. However, if the solution is not being + properly recomputed because the cfg is being modified, adding in + calls to df_check_cfg_clean can be used to find the source of that + kind of problem. */ +#if 0 +#define DF_DEBUG_CFG +#endif + + +/* Functions defined in df-core.cc. */ + +extern void df_add_problem (const struct df_problem *); +extern int df_set_flags (int); +extern int df_clear_flags (int); +extern void df_set_blocks (bitmap); +extern void df_remove_problem (struct dataflow *); +extern void df_finish_pass (bool); +extern void df_analyze_problem (struct dataflow *, bitmap, int *, int); +extern void df_analyze (); +extern void df_analyze_loop (class loop *); +extern int df_get_n_blocks (enum df_flow_dir); +extern int *df_get_postorder (enum df_flow_dir); +extern void df_simple_dataflow (enum df_flow_dir, df_init_function, + df_confluence_function_0, df_confluence_function_n, + df_transfer_function, bitmap, int *, int); +extern void df_mark_solutions_dirty (void); +extern bool df_get_bb_dirty (basic_block); +extern void df_set_bb_dirty (basic_block); +extern void df_compact_blocks (void); +extern void df_bb_replace (int, basic_block); +extern void df_bb_delete (int); +extern void df_verify (void); +#ifdef DF_DEBUG_CFG +extern void df_check_cfg_clean (void); +#endif +extern df_ref df_bb_regno_first_def_find (basic_block, unsigned int); +extern df_ref df_bb_regno_last_def_find (basic_block, unsigned int); +extern df_ref df_find_def (rtx_insn *, rtx); +extern bool df_reg_defined (rtx_insn *, rtx); +extern df_ref df_find_use (rtx_insn *, rtx); +extern bool df_reg_used (rtx_insn *, rtx); +extern void df_worklist_dataflow (struct dataflow *,bitmap, int *, int); +extern void df_print_regset (FILE *file, const_bitmap r); +extern void df_print_word_regset (FILE *file, const_bitmap r); +extern void df_dump (FILE *); +extern void df_dump_region (FILE *); +extern void df_dump_start (FILE *); +extern void df_dump_top (basic_block, FILE *); +extern void df_dump_bottom (basic_block, FILE *); +extern void df_dump_insn_top (const rtx_insn *, FILE *); +extern void df_dump_insn_bottom (const rtx_insn *, FILE *); +extern void df_refs_chain_dump (df_ref, bool, FILE *); +extern void df_regs_chain_dump (df_ref, FILE *); +extern void df_insn_debug (rtx_insn *, bool, FILE *); +extern void df_insn_debug_regno (rtx_insn *, FILE *); +extern void df_regno_debug (unsigned int, FILE *); +extern void df_ref_debug (df_ref, FILE *); +extern void debug_df_insn (rtx_insn *); +extern void debug_df_regno (unsigned int); +extern void debug_df_reg (rtx); +extern void debug_df_defno (unsigned int); +extern void debug_df_useno (unsigned int); +extern void debug_df_ref (df_ref); +extern void debug_df_chain (struct df_link *); + +/* Functions defined in df-problems.cc. */ + +extern struct df_link *df_chain_create (df_ref, df_ref); +extern void df_chain_unlink (df_ref); +extern void df_chain_copy (df_ref, struct df_link *); +extern void df_grow_bb_info (struct dataflow *); +extern void df_chain_dump (struct df_link *, FILE *); +extern void df_print_bb_index (basic_block bb, FILE *file); +extern void df_rd_add_problem (void); +extern void df_rd_simulate_artificial_defs_at_top (basic_block, bitmap); +extern void df_rd_simulate_one_insn (basic_block, rtx_insn *, bitmap); +extern void df_lr_add_problem (void); +extern void df_lr_verify_transfer_functions (void); +extern void df_live_verify_transfer_functions (void); +extern void df_live_add_problem (void); +extern void df_live_set_all_dirty (void); +extern void df_chain_add_problem (unsigned int); +extern void df_word_lr_add_problem (void); +extern bool df_word_lr_mark_ref (df_ref, bool, bitmap); +extern bool df_word_lr_simulate_defs (rtx_insn *, bitmap); +extern void df_word_lr_simulate_uses (rtx_insn *, bitmap); +extern void df_word_lr_simulate_artificial_refs_at_top (basic_block, bitmap); +extern void df_word_lr_simulate_artificial_refs_at_end (basic_block, bitmap); +extern void df_note_add_problem (void); +extern void df_md_add_problem (void); +extern void df_md_simulate_artificial_defs_at_top (basic_block, bitmap); +extern void df_md_simulate_one_insn (basic_block, rtx_insn *, bitmap); +extern void df_mir_add_problem (void); +extern void df_mir_simulate_one_insn (basic_block, rtx_insn *, bitmap, bitmap); +extern void df_simulate_find_noclobber_defs (rtx_insn *, bitmap); +extern void df_simulate_find_defs (rtx_insn *, bitmap); +extern void df_simulate_defs (rtx_insn *, bitmap); +extern void df_simulate_uses (rtx_insn *, bitmap); +extern void df_simulate_initialize_backwards (basic_block, bitmap); +extern void df_simulate_one_insn_backwards (basic_block, rtx_insn *, bitmap); +extern void df_simulate_finalize_backwards (basic_block, bitmap); +extern void df_simulate_initialize_forwards (basic_block, bitmap); +extern void df_simulate_one_insn_forwards (basic_block, rtx_insn *, bitmap); +extern void simulate_backwards_to_point (basic_block, regset, rtx); +extern bool can_move_insns_across (rtx_insn *, rtx_insn *, + rtx_insn *, rtx_insn *, + basic_block, regset, + regset, rtx_insn **); +/* Functions defined in df-scan.cc. */ + +extern void df_scan_alloc (bitmap); +extern void df_scan_add_problem (void); +extern void df_grow_reg_info (void); +extern void df_grow_insn_info (void); +extern void df_scan_blocks (void); +extern void df_uses_create (rtx *, rtx_insn *, int); +extern struct df_insn_info * df_insn_create_insn_record (rtx_insn *); +extern void df_insn_delete (rtx_insn *); +extern void df_bb_refs_record (int, bool); +extern bool df_insn_rescan (rtx_insn *); +extern bool df_insn_rescan_debug_internal (rtx_insn *); +extern void df_insn_rescan_all (void); +extern void df_process_deferred_rescans (void); +extern void df_recompute_luids (basic_block); +extern void df_insn_change_bb (rtx_insn *, basic_block); +extern void df_maybe_reorganize_use_refs (enum df_ref_order); +extern void df_maybe_reorganize_def_refs (enum df_ref_order); +extern void df_ref_change_reg_with_loc (rtx, unsigned int); +extern void df_notes_rescan (rtx_insn *); +extern void df_hard_reg_init (void); +extern void df_update_entry_block_defs (void); +extern void df_update_exit_block_uses (void); +extern void df_update_entry_exit_and_calls (void); +extern bool df_hard_reg_used_p (unsigned int); +extern unsigned int df_hard_reg_used_count (unsigned int); +extern bool df_regs_ever_live_p (unsigned int); +extern bool df_epilogue_uses_p (unsigned int); +extern void df_set_regs_ever_live (unsigned int, bool); +extern void df_compute_regs_ever_live (bool); +extern void df_scan_verify (void); + + +/*---------------------------------------------------------------------------- + Public functions access functions for the dataflow problems. +----------------------------------------------------------------------------*/ + +static inline struct df_scan_bb_info * +df_scan_get_bb_info (unsigned int index) +{ + if (index < df_scan->block_info_size) + return &((struct df_scan_bb_info *) df_scan->block_info)[index]; + else + return NULL; +} + +static inline class df_rd_bb_info * +df_rd_get_bb_info (unsigned int index) +{ + if (index < df_rd->block_info_size) + return &((class df_rd_bb_info *) df_rd->block_info)[index]; + else + return NULL; +} + +static inline class df_lr_bb_info * +df_lr_get_bb_info (unsigned int index) +{ + if (index < df_lr->block_info_size) + return &((class df_lr_bb_info *) df_lr->block_info)[index]; + else + return NULL; +} + +static inline class df_md_bb_info * +df_md_get_bb_info (unsigned int index) +{ + if (index < df_md->block_info_size) + return &((class df_md_bb_info *) df_md->block_info)[index]; + else + return NULL; +} + +static inline class df_live_bb_info * +df_live_get_bb_info (unsigned int index) +{ + if (index < df_live->block_info_size) + return &((class df_live_bb_info *) df_live->block_info)[index]; + else + return NULL; +} + +static inline class df_word_lr_bb_info * +df_word_lr_get_bb_info (unsigned int index) +{ + if (index < df_word_lr->block_info_size) + return &((class df_word_lr_bb_info *) df_word_lr->block_info)[index]; + else + return NULL; +} + +static inline class df_mir_bb_info * +df_mir_get_bb_info (unsigned int index) +{ + if (index < df_mir->block_info_size) + return &((class df_mir_bb_info *) df_mir->block_info)[index]; + else + return NULL; +} + +/* Get the live at out set for BB no matter what problem happens to be + defined. This function is used by the register allocators who + choose different dataflow problems depending on the optimization + level. */ + +static inline bitmap +df_get_live_out (basic_block bb) +{ + gcc_checking_assert (df_lr); + + if (df_live) + return DF_LIVE_OUT (bb); + else + return DF_LR_OUT (bb); +} + +/* Get the live at in set for BB no matter what problem happens to be + defined. This function is used by the register allocators who + choose different dataflow problems depending on the optimization + level. */ + +static inline bitmap +df_get_live_in (basic_block bb) +{ + gcc_checking_assert (df_lr); + + if (df_live) + return DF_LIVE_IN (bb); + else + return DF_LR_IN (bb); +} + +/* Get basic block info. */ +/* Get the artificial defs for a basic block. */ + +static inline df_ref +df_get_artificial_defs (unsigned int bb_index) +{ + return df_scan_get_bb_info (bb_index)->artificial_defs; +} + + +/* Get the artificial uses for a basic block. */ + +static inline df_ref +df_get_artificial_uses (unsigned int bb_index) +{ + return df_scan_get_bb_info (bb_index)->artificial_uses; +} + +/* If INSN defines exactly one register, return the associated reference, + otherwise return null. */ + +static inline df_ref +df_single_def (const df_insn_info *info) +{ + df_ref defs = DF_INSN_INFO_DEFS (info); + return defs && !DF_REF_NEXT_LOC (defs) ? defs : NULL; +} + +/* If INSN uses exactly one register, return the associated reference, + otherwise return null. */ + +static inline df_ref +df_single_use (const df_insn_info *info) +{ + df_ref uses = DF_INSN_INFO_USES (info); + return uses && !DF_REF_NEXT_LOC (uses) ? uses : NULL; +} + +/* web */ + +struct web_entry_base +{ + private: + /* Reference to the parent in the union/find tree. */ + web_entry_base *pred_pvt; + + public: + /* Accessors. */ + web_entry_base *pred () { return pred_pvt; } + void set_pred (web_entry_base *p) { pred_pvt = p; } + + /* Find representative in union-find tree. */ + web_entry_base *unionfind_root (); + + /* Union with another set, returning TRUE if they are already unioned. */ + friend bool unionfind_union (web_entry_base *first, web_entry_base *second); +}; + +#endif /* GCC_DF_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dfp.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dfp.h new file mode 100644 index 0000000..15667dd --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dfp.h @@ -0,0 +1,50 @@ +/* Decimal floating point support functions for GNU compiler. + Copyright (C) 2005-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DFP_H +#define GCC_DFP_H + +/* Encode REAL_VALUE_TYPEs into 32/64/128-bit IEEE 754 encoded values. */ +void encode_decimal32 (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); +void encode_decimal64 (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); +void decode_decimal128 (const struct real_format *, REAL_VALUE_TYPE *, const long *); + +/* Decode 32/64/128-bit IEEE 754 encoded values into REAL_VALUE_TYPEs. */ +void decode_decimal32 (const struct real_format *, REAL_VALUE_TYPE *, const long *); +void decode_decimal64 (const struct real_format *, REAL_VALUE_TYPE *, const long *); +void encode_decimal128 (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); + +/* Arithmetic and conversion functions. */ +int decimal_do_compare (const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, int); +void decimal_real_from_string (REAL_VALUE_TYPE *, const char *); +void decimal_round_for_format (const struct real_format *, REAL_VALUE_TYPE *); +void decimal_real_convert (REAL_VALUE_TYPE *, const real_format *, + const REAL_VALUE_TYPE *); +void decimal_real_to_decimal (char *, const REAL_VALUE_TYPE *, size_t, size_t, int); +void decimal_do_fix_trunc (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); +void decimal_real_maxval (REAL_VALUE_TYPE *, int, machine_mode); +wide_int decimal_real_to_integer (const REAL_VALUE_TYPE *, bool *, int); +HOST_WIDE_INT decimal_real_to_integer (const REAL_VALUE_TYPE *); + +#ifdef TREE_CODE +bool decimal_real_arithmetic (REAL_VALUE_TYPE *, enum tree_code, const REAL_VALUE_TYPE *, + const REAL_VALUE_TYPE *); +#endif + +#endif /* GCC_DFP_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-color.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-color.h new file mode 100644 index 0000000..f0b50c5 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-color.h @@ -0,0 +1,65 @@ +/* Copyright (C) 2013-2022 Free Software Foundation, Inc. + Contributed by Manuel Lopez-Ibanez + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Based on code from: */ +/* grep.c - main driver file for grep. + Copyright (C) 1992-2022 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA + 02110-1301, USA. + + Written July 1992 by Mike Haertel. */ + +#ifndef GCC_DIAGNOSTIC_COLOR_H +#define GCC_DIAGNOSTIC_COLOR_H + +/* Whether to add color to diagnostics: + o DIAGNOSTICS_COLOR_NO: never + o DIAGNOSTICS_COLOR_YES: always + o DIAGNOSTICS_COLOR_AUTO: depending on the output stream. */ +typedef enum +{ + DIAGNOSTICS_COLOR_NO = 0, + DIAGNOSTICS_COLOR_YES = 1, + DIAGNOSTICS_COLOR_AUTO = 2 +} diagnostic_color_rule_t; + +const char *colorize_start (bool, const char *, size_t); +const char *colorize_stop (bool); +bool colorize_init (diagnostic_color_rule_t); + +inline const char * +colorize_start (bool show_color, const char *name) +{ + return colorize_start (show_color, name, strlen (name)); +} + +#endif /* ! GCC_DIAGNOSTIC_COLOR_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-core.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-core.h new file mode 100644 index 0000000..286954a --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-core.h @@ -0,0 +1,128 @@ +/* Declarations of core diagnostic functionality for code that does + not need to deal with diagnostic contexts or diagnostic info + structures. + Copyright (C) 1998-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DIAGNOSTIC_CORE_H +#define GCC_DIAGNOSTIC_CORE_H + +#include "bversion.h" + +/* Constants used to discriminate diagnostics. */ +typedef enum +{ +#define DEFINE_DIAGNOSTIC_KIND(K, msgid, C) K, +#include "diagnostic.def" +#undef DEFINE_DIAGNOSTIC_KIND + DK_LAST_DIAGNOSTIC_KIND, + /* This is used for tagging pragma pops in the diagnostic + classification history chain. */ + DK_POP +} diagnostic_t; + +/* RAII-style class for grouping related diagnostics. */ + +class auto_diagnostic_group +{ + public: + auto_diagnostic_group (); + ~auto_diagnostic_group (); +}; + +/* Forward decl. */ +class diagnostic_metadata; /* See diagnostic-metadata.h. */ + +extern const char *progname; + +extern const char *trim_filename (const char *); + +/* If we haven't already defined a front-end-specific diagnostics + style, use the generic one. */ +#ifndef GCC_DIAG_STYLE +#define GCC_DIAG_STYLE __gcc_tdiag__ +#endif +/* None of these functions are suitable for ATTRIBUTE_PRINTF, because + each language front end can extend them with its own set of format + specifiers. We must use custom format checks. */ +#if (CHECKING_P && GCC_VERSION >= 4001) || GCC_VERSION == BUILDING_GCC_VERSION +#define ATTRIBUTE_GCC_DIAG(m, n) __attribute__ ((__format__ (GCC_DIAG_STYLE, m, n))) ATTRIBUTE_NONNULL(m) +#else +#define ATTRIBUTE_GCC_DIAG(m, n) ATTRIBUTE_NONNULL(m) +#endif +extern void internal_error (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2) + ATTRIBUTE_NORETURN; +extern void internal_error_no_backtrace (const char *, ...) + ATTRIBUTE_GCC_DIAG(1,2) ATTRIBUTE_NORETURN; +/* Pass one of the OPT_W* from options.h as the first parameter. */ +extern bool warning (int, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3); +extern bool warning_n (location_t, int, unsigned HOST_WIDE_INT, + const char *, const char *, ...) + ATTRIBUTE_GCC_DIAG(4,6) ATTRIBUTE_GCC_DIAG(5,6); +extern bool warning_n (rich_location *, int, unsigned HOST_WIDE_INT, + const char *, const char *, ...) + ATTRIBUTE_GCC_DIAG(4, 6) ATTRIBUTE_GCC_DIAG(5, 6); +extern bool warning_at (location_t, int, const char *, ...) + ATTRIBUTE_GCC_DIAG(3,4); +extern bool warning_at (rich_location *, int, const char *, ...) + ATTRIBUTE_GCC_DIAG(3,4); +extern bool warning_meta (rich_location *, + const diagnostic_metadata &, int, + const char *, ...) + ATTRIBUTE_GCC_DIAG(4,5); +extern void error (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2); +extern void error_n (location_t, unsigned HOST_WIDE_INT, const char *, + const char *, ...) + ATTRIBUTE_GCC_DIAG(3,5) ATTRIBUTE_GCC_DIAG(4,5); +extern void error_at (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3); +extern void error_at (rich_location *, const char *, ...) + ATTRIBUTE_GCC_DIAG(2,3); +extern void fatal_error (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3) + ATTRIBUTE_NORETURN; +/* Pass one of the OPT_W* from options.h as the second parameter. */ +extern bool pedwarn (location_t, int, const char *, ...) + ATTRIBUTE_GCC_DIAG(3,4); +extern bool pedwarn (rich_location *, int, const char *, ...) + ATTRIBUTE_GCC_DIAG(3,4); +extern bool permerror (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3); +extern bool permerror (rich_location *, const char *, + ...) ATTRIBUTE_GCC_DIAG(2,3); +extern void sorry (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2); +extern void sorry_at (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3); +extern void inform (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3); +extern void inform (rich_location *, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3); +extern void inform_n (location_t, unsigned HOST_WIDE_INT, const char *, + const char *, ...) + ATTRIBUTE_GCC_DIAG(3,5) ATTRIBUTE_GCC_DIAG(4,5); +extern void verbatim (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2); +extern bool emit_diagnostic (diagnostic_t, location_t, int, + const char *, ...) ATTRIBUTE_GCC_DIAG(4,5); +extern bool emit_diagnostic (diagnostic_t, rich_location *, int, + const char *, ...) ATTRIBUTE_GCC_DIAG(4,5); +extern bool emit_diagnostic_valist (diagnostic_t, location_t, int, const char *, + va_list *) ATTRIBUTE_GCC_DIAG (4,0); +extern bool seen_error (void); + +#ifdef BUFSIZ + /* N.B. Unlike all the others, fnotice is just gettext+fprintf, and + therefore it can have ATTRIBUTE_PRINTF. */ +extern void fnotice (FILE *, const char *, ...) + ATTRIBUTE_PRINTF_2; +#endif + +#endif /* ! GCC_DIAGNOSTIC_CORE_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-event-id.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-event-id.h new file mode 100644 index 0000000..35c6697 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-event-id.h @@ -0,0 +1,61 @@ +/* A class for referring to events within a diagnostic_path. + Copyright (C) 2019-2022 Free Software Foundation, Inc. + Contributed by David Malcolm + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DIAGNOSTIC_EVENT_ID_H +#define GCC_DIAGNOSTIC_EVENT_ID_H + +/* A class for referring to events within a diagnostic_path. + + They are stored as 0-based offsets into the events, but + printed (e.g. via %@) as 1-based numbers. + + For example, a 3-event path has event offsets 0, 1, and 2, + which would be shown to the user as "(1)", "(2)" and "(3)". + + This has its own header so that pretty-print.cc can use this + to implement "%@" without bringing in all of diagnostic_path + (which e.g. refers to "tree"). */ + +class diagnostic_event_id_t +{ + public: + diagnostic_event_id_t () : m_index (UNKNOWN_EVENT_IDX) {} + diagnostic_event_id_t (int zero_based_idx) : m_index (zero_based_idx) {} + + bool known_p () const { return m_index != UNKNOWN_EVENT_IDX; } + + int one_based () const + { + gcc_assert (known_p ()); + return m_index + 1; + } + + private: + static const int UNKNOWN_EVENT_IDX = -1; + int m_index; // zero-based +}; + +/* A pointer to a diagnostic_event_id_t, for use with the "%@" format + code, which will print a 1-based representation for it, with suitable + colorization, e.g. "(1)". + The %@ format code requires that known_p be true for the event ID. */ +typedef diagnostic_event_id_t *diagnostic_event_id_ptr; + +#endif /* ! GCC_DIAGNOSTIC_EVENT_ID_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-metadata.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-metadata.h new file mode 100644 index 0000000..ae59942 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-metadata.h @@ -0,0 +1,42 @@ +/* Additional metadata for a diagnostic. + Copyright (C) 2019-2022 Free Software Foundation, Inc. + Contributed by David Malcolm + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DIAGNOSTIC_METADATA_H +#define GCC_DIAGNOSTIC_METADATA_H + +/* A bundle of additional metadata that can be associated with a + diagnostic. + + Currently this only supports associating a CWE identifier with a + diagnostic. */ + +class diagnostic_metadata +{ + public: + diagnostic_metadata () : m_cwe (0) {} + + void add_cwe (int cwe) { m_cwe = cwe; } + int get_cwe () const { return m_cwe; } + + private: + int m_cwe; +}; + +#endif /* ! GCC_DIAGNOSTIC_METADATA_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-path.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-path.h new file mode 100644 index 0000000..3bd89d6 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-path.h @@ -0,0 +1,149 @@ +/* Paths through the code associated with a diagnostic. + Copyright (C) 2019-2022 Free Software Foundation, Inc. + Contributed by David Malcolm + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DIAGNOSTIC_PATH_H +#define GCC_DIAGNOSTIC_PATH_H + +#include "diagnostic.h" /* for ATTRIBUTE_GCC_DIAG. */ +#include "diagnostic-event-id.h" + +/* A diagnostic_path is an optional additional piece of metadata associated + with a diagnostic (via its rich_location). + + It describes a sequence of events predicted by the compiler that + lead to the problem occurring, with their locations in the user's source, + and text descriptions. + + For example, the following error has a 3-event path: + + test.c: In function 'demo': + test.c:29:5: error: passing NULL as argument 1 to 'PyList_Append' which + requires a non-NULL parameter + 29 | PyList_Append(list, item); + | ^~~~~~~~~~~~~~~~~~~~~~~~~ + 'demo': events 1-3 + | + | 25 | list = PyList_New(0); + | | ^~~~~~~~~~~~~ + | | | + | | (1) when 'PyList_New' fails, returning NULL + | 26 | + | 27 | for (i = 0; i < count; i++) { + | | ~~~ + | | | + | | (2) when 'i < count' + | 28 | item = PyLong_FromLong(random()); + | 29 | PyList_Append(list, item); + | | ~~~~~~~~~~~~~~~~~~~~~~~~~ + | | | + | | (3) when calling 'PyList_Append', passing NULL from (1) as argument 1 + | + + The diagnostic-printing code has consolidated the path into a single + run of events, since all the events are near each other and within the same + function; more complicated examples (such as interprocedural paths) + might be printed as multiple runs of events. */ + +/* Abstract base classes, describing events within a path, and the paths + themselves. */ + +/* One event within a diagnostic_path. */ + +class diagnostic_event +{ + public: + virtual ~diagnostic_event () {} + + virtual location_t get_location () const = 0; + + virtual tree get_fndecl () const = 0; + + /* Stack depth, so that consumers can visualizes the interprocedural + calls, returns, and frame nesting. */ + virtual int get_stack_depth () const = 0; + + /* Get a localized (and possibly colorized) description of this event. */ + virtual label_text get_desc (bool can_colorize) const = 0; +}; + +/* Abstract base class for getting at a sequence of events. */ + +class diagnostic_path +{ + public: + virtual ~diagnostic_path () {} + virtual unsigned num_events () const = 0; + virtual const diagnostic_event & get_event (int idx) const = 0; + + bool interprocedural_p () const; +}; + +/* Concrete subclasses. */ + +/* A simple implementation of diagnostic_event. */ + +class simple_diagnostic_event : public diagnostic_event +{ + public: + simple_diagnostic_event (location_t loc, tree fndecl, int depth, + const char *desc); + ~simple_diagnostic_event (); + + location_t get_location () const FINAL OVERRIDE { return m_loc; } + tree get_fndecl () const FINAL OVERRIDE { return m_fndecl; } + int get_stack_depth () const FINAL OVERRIDE { return m_depth; } + label_text get_desc (bool) const FINAL OVERRIDE + { + return label_text::borrow (m_desc); + } + + private: + location_t m_loc; + tree m_fndecl; + int m_depth; + char *m_desc; // has been i18n-ed and formatted +}; + +/* A simple implementation of diagnostic_path, as a vector of + simple_diagnostic_event instances. */ + +class simple_diagnostic_path : public diagnostic_path +{ + public: + simple_diagnostic_path (pretty_printer *event_pp) + : m_event_pp (event_pp) {} + + unsigned num_events () const FINAL OVERRIDE; + const diagnostic_event & get_event (int idx) const FINAL OVERRIDE; + + diagnostic_event_id_t add_event (location_t loc, tree fndecl, int depth, + const char *fmt, ...) + ATTRIBUTE_GCC_DIAG(5,6); + + private: + auto_delete_vec m_events; + + /* (for use by add_event). */ + pretty_printer *m_event_pp; +}; + +extern void debug (diagnostic_path *path); + +#endif /* ! GCC_DIAGNOSTIC_PATH_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-spec.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-spec.h new file mode 100644 index 0000000..28e5e5c --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-spec.h @@ -0,0 +1,140 @@ +/* Language-independent APIs to enable/disable per-location warnings. + + Copyright (C) 2021-2022 Free Software Foundation, Inc. + Contributed by Martin Sebor + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 3, or (at your option) any later + version. + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#ifndef DIAGNOSTIC_SPEC_H_INCLUDED +#define DIAGNOSTIC_SPEC_H_INCLUDED + +#include "hash-map.h" + +/* A "bitset" of warning groups. */ + +class nowarn_spec_t +{ +public: + enum + { + /* Middle end warnings about invalid accesses. */ + NW_ACCESS = 1 << 0, + /* Front end/lexical warnings. */ + NW_LEXICAL = 1 << 1, + /* Warnings about null pointers. */ + NW_NONNULL = 1 << 2, + /* Warnings about uninitialized reads. */ + NW_UNINIT = 1 << 3, + /* Warnings about arithmetic overflow. */ + NW_VFLOW = 1 << 4, + /* Warnings about dangling pointers. */ + NW_DANGLING = 1 << 5, + /* All other unclassified warnings. */ + NW_OTHER = 1 << 6, + /* All groups of warnings. */ + NW_ALL = (NW_ACCESS | NW_LEXICAL | NW_NONNULL + | NW_UNINIT | NW_VFLOW | NW_DANGLING | NW_OTHER) + }; + + nowarn_spec_t (): m_bits () { } + + nowarn_spec_t (opt_code); + + /* Return the raw bitset. */ + operator unsigned() const + { + return m_bits; + } + + /* Return true if the bitset is clear. */ + bool operator!() const + { + return !m_bits; + } + + /* Return the inverse of the bitset. */ + nowarn_spec_t operator~() const + { + nowarn_spec_t res (*this); + res.m_bits &= ~NW_ALL; + return res; + } + + /* Set *THIS to the bitwise OR of *THIS and RHS. */ + nowarn_spec_t& operator|= (const nowarn_spec_t &rhs) + { + m_bits |= rhs.m_bits; + return *this; + } + + /* Set *THIS to the bitwise AND of *THIS and RHS. */ + nowarn_spec_t& operator&= (const nowarn_spec_t &rhs) + { + m_bits &= rhs.m_bits; + return *this; + } + + /* Set *THIS to the bitwise exclusive OR of *THIS and RHS. */ + nowarn_spec_t& operator^= (const nowarn_spec_t &rhs) + { + m_bits ^= rhs.m_bits; + return *this; + } + +private: + /* Bitset of warning groups. */ + unsigned m_bits; +}; + +/* Return the bitwise OR of LHS and RHS. */ + +inline nowarn_spec_t +operator| (const nowarn_spec_t &lhs, const nowarn_spec_t &rhs) +{ + return nowarn_spec_t (lhs) |= rhs; +} + +/* Return the bitwise AND of LHS and RHS. */ + +inline nowarn_spec_t +operator& (const nowarn_spec_t &lhs, const nowarn_spec_t &rhs) +{ + return nowarn_spec_t (lhs) &= rhs; +} + +/* Return true if LHS is equal RHS. */ + +inline bool +operator== (const nowarn_spec_t &lhs, const nowarn_spec_t &rhs) +{ + return static_cast(lhs) == static_cast(rhs); +} + +/* Return true if LHS is not equal RHS. */ + +inline bool +operator!= (const nowarn_spec_t &lhs, const nowarn_spec_t &rhs) +{ + return !(lhs == rhs); +} + +typedef hash_map nowarn_map_t; + +/* A mapping from a 'location_t' to the warning spec set for it. */ +extern GTY(()) nowarn_map_t *nowarn_map; + +#endif // DIAGNOSTIC_SPEC_H_INCLUDED diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-url.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-url.h new file mode 100644 index 0000000..f349f9e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic-url.h @@ -0,0 +1,52 @@ +/* Copyright (C) 2019-2022 Free Software Foundation, Inc. + Contributed by David Malcolm . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DIAGNOSTIC_URL_H +#define GCC_DIAGNOSTIC_URL_H + +/* Whether to add URLs to diagnostics: + - DIAGNOSTICS_URL_NO: never + - DIAGNOSTICS_URL_YES: always + - DIAGNOSTICS_URL_AUTO: depending on the output stream. */ +typedef enum +{ + DIAGNOSTICS_URL_NO = 0, + DIAGNOSTICS_URL_YES = 1, + DIAGNOSTICS_URL_AUTO = 2 +} diagnostic_url_rule_t; + +/* Tells whether URLs should be emitted, and, if so, how to + terminate strings within the escape sequence. */ +enum diagnostic_url_format +{ + /* No URLs shall be emitted. */ + URL_FORMAT_NONE, + + /* Use ST string termination. */ + URL_FORMAT_ST, + + /* Use BEL string termination. */ + URL_FORMAT_BEL +}; + +const diagnostic_url_format URL_FORMAT_DEFAULT = URL_FORMAT_BEL; + +extern diagnostic_url_format determine_url_format (diagnostic_url_rule_t); + +#endif /* ! GCC_DIAGNOSTIC_URL_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic.def new file mode 100644 index 0000000..5020811 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic.def @@ -0,0 +1,55 @@ +/* Copyright (C) 2001-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* DK_UNSPECIFIED must be first so it has a value of zero. We never + assign this kind to an actual diagnostic, we only use this in + variables that can hold a kind, to mean they have yet to have a + kind specified. I.e. they're uninitialized. Within the diagnostic + machinery, this kind also means "don't change the existing kind", + meaning "no change is specified". */ +DEFINE_DIAGNOSTIC_KIND (DK_UNSPECIFIED, "", NULL) + +/* If a diagnostic is set to DK_IGNORED, it won't get reported at all. + This is used by the diagnostic machinery when it wants to disable a + diagnostic without disabling the option which causes it. */ +DEFINE_DIAGNOSTIC_KIND (DK_IGNORED, "", NULL) + +/* The remainder are real diagnostic types. */ +DEFINE_DIAGNOSTIC_KIND (DK_FATAL, "fatal error: ", "error") +DEFINE_DIAGNOSTIC_KIND (DK_ICE, "internal compiler error: ", "error") +DEFINE_DIAGNOSTIC_KIND (DK_ERROR, "error: ", "error") +DEFINE_DIAGNOSTIC_KIND (DK_SORRY, "sorry, unimplemented: ", "error") +DEFINE_DIAGNOSTIC_KIND (DK_WARNING, "warning: ", "warning") +DEFINE_DIAGNOSTIC_KIND (DK_ANACHRONISM, "anachronism: ", "warning") +DEFINE_DIAGNOSTIC_KIND (DK_NOTE, "note: ", "note") +DEFINE_DIAGNOSTIC_KIND (DK_DEBUG, "debug: ", "note") + +/* For use when using the diagnostic_show_locus machinery to show + a range of events within a path. */ +DEFINE_DIAGNOSTIC_KIND (DK_DIAGNOSTIC_PATH, "path: ", "path") + +/* These two would be re-classified as DK_WARNING or DK_ERROR, so the +prefix does not matter. */ +DEFINE_DIAGNOSTIC_KIND (DK_PEDWARN, "pedwarn: ", NULL) +DEFINE_DIAGNOSTIC_KIND (DK_PERMERROR, "permerror: ", NULL) +/* This one is just for counting DK_WARNING promoted to DK_ERROR + due to -Werror and -Werror=warning. */ +DEFINE_DIAGNOSTIC_KIND (DK_WERROR, "error: ", NULL) +/* This is like DK_ICE, but backtrace is not printed. Used in the driver + when reporting fatal signal in the compiler. */ +DEFINE_DIAGNOSTIC_KIND (DK_ICE_NOBT, "internal compiler error: ", "error") diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic.h new file mode 100644 index 0000000..3ca3297 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/diagnostic.h @@ -0,0 +1,590 @@ +/* Various declarations for language-independent diagnostics subroutines. + Copyright (C) 2000-2022 Free Software Foundation, Inc. + Contributed by Gabriel Dos Reis + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DIAGNOSTIC_H +#define GCC_DIAGNOSTIC_H + +#include "pretty-print.h" +#include "diagnostic-core.h" + +/* An enum for controlling what units to use for the column number + when diagnostics are output, used by the -fdiagnostics-column-unit option. + Tabs will be expanded or not according to the value of -ftabstop. The origin + (default 1) is controlled by -fdiagnostics-column-origin. */ + +enum diagnostics_column_unit +{ + /* The default from GCC 11 onwards: display columns. */ + DIAGNOSTICS_COLUMN_UNIT_DISPLAY, + + /* The behavior in GCC 10 and earlier: simple bytes. */ + DIAGNOSTICS_COLUMN_UNIT_BYTE +}; + +/* An enum for controlling how to print non-ASCII characters/bytes when + a diagnostic suggests escaping the source code on output. */ + +enum diagnostics_escape_format +{ + /* Escape non-ASCII Unicode characters in the form and + non-UTF-8 bytes in the form . */ + DIAGNOSTICS_ESCAPE_FORMAT_UNICODE, + + /* Escape non-ASCII bytes in the form (thus showing the underlying + encoding of non-ASCII Unicode characters). */ + DIAGNOSTICS_ESCAPE_FORMAT_BYTES +}; + +/* Enum for overriding the standard output format. */ + +enum diagnostics_output_format +{ + /* The default: textual output. */ + DIAGNOSTICS_OUTPUT_FORMAT_TEXT, + + /* JSON-based output. */ + DIAGNOSTICS_OUTPUT_FORMAT_JSON +}; + +/* An enum for controlling how diagnostic_paths should be printed. */ +enum diagnostic_path_format +{ + /* Don't print diagnostic_paths. */ + DPF_NONE, + + /* Print diagnostic_paths by emitting a separate "note" for every event + in the path. */ + DPF_SEPARATE_EVENTS, + + /* Print diagnostic_paths by consolidating events together where they + are close enough, and printing such runs of events with multiple + calls to diagnostic_show_locus, showing the individual events in + each run via labels in the source. */ + DPF_INLINE_EVENTS +}; + +/* An enum for capturing values of GCC_EXTRA_DIAGNOSTIC_OUTPUT, + and for -fdiagnostics-parseable-fixits. */ + +enum diagnostics_extra_output_kind +{ + /* No extra output, or an unrecognized value. */ + EXTRA_DIAGNOSTIC_OUTPUT_none, + + /* Emit fix-it hints using the "fixits-v1" format, equivalent to + -fdiagnostics-parseable-fixits. */ + EXTRA_DIAGNOSTIC_OUTPUT_fixits_v1, + + /* Emit fix-it hints using the "fixits-v2" format. */ + EXTRA_DIAGNOSTIC_OUTPUT_fixits_v2 +}; + +/* A diagnostic is described by the MESSAGE to send, the FILE and LINE of + its context and its KIND (ice, error, warning, note, ...) See complete + list in diagnostic.def. */ +struct diagnostic_info +{ + diagnostic_info () + : message (), richloc (), metadata (), x_data (), kind (), option_index (), + m_iinfo () + { } + + /* Text to be formatted. */ + text_info message; + + /* The location at which the diagnostic is to be reported. */ + rich_location *richloc; + + /* An optional bundle of metadata associated with the diagnostic + (or NULL). */ + const diagnostic_metadata *metadata; + + /* Auxiliary data for client. */ + void *x_data; + /* The kind of diagnostic it is about. */ + diagnostic_t kind; + /* Which OPT_* directly controls this diagnostic. */ + int option_index; + + /* Inlining context containing locations for each call site along + the inlining stack. */ + struct inlining_info + { + /* Locations along the inlining stack. */ + auto_vec m_ilocs; + /* The abstract origin of the location. */ + void *m_ao; + /* Set if every M_ILOCS element is in a system header. */ + bool m_allsyslocs; + } m_iinfo; +}; + +/* Each time a diagnostic's classification is changed with a pragma, + we record the change and the location of the change in an array of + these structs. */ +struct diagnostic_classification_change_t +{ + location_t location; + int option; + diagnostic_t kind; +}; + +/* Forward declarations. */ +typedef void (*diagnostic_starter_fn) (diagnostic_context *, + diagnostic_info *); + +typedef void (*diagnostic_start_span_fn) (diagnostic_context *, + expanded_location); + +typedef void (*diagnostic_finalizer_fn) (diagnostic_context *, + diagnostic_info *, + diagnostic_t); + +class edit_context; +namespace json { class value; } + +/* This data structure bundles altogether any information relevant to + the context of a diagnostic message. */ +struct diagnostic_context +{ + /* Where most of the diagnostic formatting work is done. */ + pretty_printer *printer; + + /* Cache of source code. */ + file_cache *m_file_cache; + + /* The number of times we have issued diagnostics. */ + int diagnostic_count[DK_LAST_DIAGNOSTIC_KIND]; + + /* True if it has been requested that warnings be treated as errors. */ + bool warning_as_error_requested; + + /* The number of option indexes that can be passed to warning() et + al. */ + int n_opts; + + /* For each option index that can be passed to warning() et al + (OPT_* from options.h when using this code with the core GCC + options), this array may contain a new kind that the diagnostic + should be changed to before reporting, or DK_UNSPECIFIED to leave + it as the reported kind, or DK_IGNORED to not report it at + all. */ + diagnostic_t *classify_diagnostic; + + /* History of all changes to the classifications above. This list + is stored in location-order, so we can search it, either + binary-wise or end-to-front, to find the most recent + classification for a given diagnostic, given the location of the + diagnostic. */ + diagnostic_classification_change_t *classification_history; + + /* The size of the above array. */ + int n_classification_history; + + /* For pragma push/pop. */ + int *push_list; + int n_push; + + /* True if we should print the source line with a caret indicating + the location. */ + bool show_caret; + + /* Maximum width of the source line printed. */ + int caret_max_width; + + /* Character used for caret diagnostics. */ + char caret_chars[rich_location::STATICALLY_ALLOCATED_RANGES]; + + /* True if we should print any CWE identifiers associated with + diagnostics. */ + bool show_cwe; + + /* How should diagnostic_path objects be printed. */ + enum diagnostic_path_format path_format; + + /* True if we should print stack depths when printing diagnostic paths. */ + bool show_path_depths; + + /* True if we should print the command line option which controls + each diagnostic, if known. */ + bool show_option_requested; + + /* True if we should raise a SIGABRT on errors. */ + bool abort_on_error; + + /* True if we should show the column number on diagnostics. */ + bool show_column; + + /* True if pedwarns are errors. */ + bool pedantic_errors; + + /* True if permerrors are warnings. */ + bool permissive; + + /* The index of the option to associate with turning permerrors into + warnings. */ + int opt_permissive; + + /* True if errors are fatal. */ + bool fatal_errors; + + /* True if all warnings should be disabled. */ + bool dc_inhibit_warnings; + + /* True if warnings should be given in system headers. */ + bool dc_warn_system_headers; + + /* Maximum number of errors to report. */ + int max_errors; + + /* This function is called before any message is printed out. It is + responsible for preparing message prefix and such. For example, it + might say: + In file included from "/usr/local/include/curses.h:5: + from "/home/gdr/src/nifty_printer.h:56: + ... + */ + diagnostic_starter_fn begin_diagnostic; + + /* This function is called by diagnostic_show_locus in between + disjoint spans of source code, so that the context can print + something to indicate that a new span of source code has begun. */ + diagnostic_start_span_fn start_span; + + /* This function is called after the diagnostic message is printed. */ + diagnostic_finalizer_fn end_diagnostic; + + /* Client hook to report an internal error. */ + void (*internal_error) (diagnostic_context *, const char *, va_list *); + + /* Client hook to say whether the option controlling a diagnostic is + enabled. Returns nonzero if enabled, zero if disabled. */ + int (*option_enabled) (int, unsigned, void *); + + /* Client information to pass as second argument to + option_enabled. */ + void *option_state; + + /* Client hook to return the name of an option that controls a + diagnostic. Returns malloced memory. The first diagnostic_t + argument is the kind of diagnostic before any reclassification + (of warnings as errors, etc.); the second is the kind after any + reclassification. May return NULL if no name is to be printed. + May be passed 0 as well as the index of a particular option. */ + char *(*option_name) (diagnostic_context *, int, diagnostic_t, diagnostic_t); + + /* Client hook to return a URL describing the option that controls + a diagnostic. Returns malloced memory. May return NULL if no URL + is available. May be passed 0 as well as the index of a + particular option. */ + char *(*get_option_url) (diagnostic_context *, int); + + void (*print_path) (diagnostic_context *, const diagnostic_path *); + json::value *(*make_json_for_path) (diagnostic_context *, const diagnostic_path *); + + /* Auxiliary data for client. */ + void *x_data; + + /* Used to detect that the last caret was printed at the same location. */ + location_t last_location; + + /* Used to detect when the input file stack has changed since last + described. */ + const line_map_ordinary *last_module; + + int lock; + + /* A copy of lang_hooks.option_lang_mask (). */ + unsigned lang_mask; + + bool inhibit_notes_p; + + /* When printing source code, should the characters at carets and ranges + be colorized? (assuming colorization is on at all). + This should be true for frontends that generate range information + (so that the ranges of code are colorized), + and false for frontends that merely specify points within the + source code (to avoid e.g. colorizing just the first character in + a token, which would look strange). */ + bool colorize_source_p; + + /* When printing source code, should labelled ranges be printed? */ + bool show_labels_p; + + /* When printing source code, should there be a left-hand margin + showing line numbers? */ + bool show_line_numbers_p; + + /* If printing source code, what should the minimum width of the margin + be? Line numbers will be right-aligned, and padded to this width. */ + int min_margin_width; + + /* Usable by plugins; if true, print a debugging ruler above the + source output. */ + bool show_ruler_p; + + /* True if -freport-bug option is used. */ + bool report_bug; + + /* Used to specify additional diagnostic output to be emitted after the + rest of the diagnostic. This is for implementing + -fdiagnostics-parseable-fixits and GCC_EXTRA_DIAGNOSTIC_OUTPUT. */ + enum diagnostics_extra_output_kind extra_output_kind; + + /* What units to use when outputting the column number. */ + enum diagnostics_column_unit column_unit; + + /* The origin for the column number (1-based or 0-based typically). */ + int column_origin; + + /* The size of the tabstop for tab expansion. */ + int tabstop; + + /* How should non-ASCII/non-printable bytes be escaped when + a diagnostic suggests escaping the source code on output. */ + enum diagnostics_escape_format escape_format; + + /* If non-NULL, an edit_context to which fix-it hints should be + applied, for generating patches. */ + edit_context *edit_context_ptr; + + /* How many diagnostic_group instances are currently alive. */ + int diagnostic_group_nesting_depth; + + /* How many diagnostics have been emitted since the bottommost + diagnostic_group was pushed. */ + int diagnostic_group_emission_count; + + /* Optional callbacks for handling diagnostic groups. */ + + /* If non-NULL, this will be called immediately before the first + time a diagnostic is emitted within a stack of groups. */ + void (*begin_group_cb) (diagnostic_context * context); + + /* If non-NULL, this will be called when a stack of groups is + popped if any diagnostics were emitted within that group. */ + void (*end_group_cb) (diagnostic_context * context); + + /* Callback for final cleanup. */ + void (*final_cb) (diagnostic_context *context); + + /* Callback to set the locations of call sites along the inlining + stack corresponding to a diagnostic location. Needed to traverse + the BLOCK_SUPERCONTEXT() chain hanging off the LOCATION_BLOCK() + of a diagnostic's location. */ + void (*set_locations_cb)(diagnostic_context *, diagnostic_info *); + + /* Include files that diagnostic_report_current_module has already listed the + include path for. */ + hash_set *includes_seen; +}; + +static inline void +diagnostic_inhibit_notes (diagnostic_context * context) +{ + context->inhibit_notes_p = true; +} + + +/* Client supplied function to announce a diagnostic. */ +#define diagnostic_starter(DC) (DC)->begin_diagnostic + +/* Client supplied function called after a diagnostic message is + displayed. */ +#define diagnostic_finalizer(DC) (DC)->end_diagnostic + +/* Extension hooks for client. */ +#define diagnostic_context_auxiliary_data(DC) (DC)->x_data +#define diagnostic_info_auxiliary_data(DI) (DI)->x_data + +/* Same as pp_format_decoder. Works on 'diagnostic_context *'. */ +#define diagnostic_format_decoder(DC) ((DC)->printer->format_decoder) + +/* Same as output_prefixing_rule. Works on 'diagnostic_context *'. */ +#define diagnostic_prefixing_rule(DC) ((DC)->printer->wrapping.rule) + +/* Raise SIGABRT on any diagnostic of severity DK_ERROR or higher. */ +#define diagnostic_abort_on_error(DC) \ + (DC)->abort_on_error = true + +/* This diagnostic_context is used by front-ends that directly output + diagnostic messages without going through `error', `warning', + and similar functions. */ +extern diagnostic_context *global_dc; + +/* Returns whether the diagnostic framework has been intialized already and is + ready for use. */ +#define diagnostic_ready_p() (global_dc->printer != NULL) + +/* The total count of a KIND of diagnostics emitted so far. */ +#define diagnostic_kind_count(DC, DK) (DC)->diagnostic_count[(int) (DK)] + +/* The number of errors that have been issued so far. Ideally, these + would take a diagnostic_context as an argument. */ +#define errorcount diagnostic_kind_count (global_dc, DK_ERROR) +/* Similarly, but for warnings. */ +#define warningcount diagnostic_kind_count (global_dc, DK_WARNING) +/* Similarly, but for warnings promoted to errors. */ +#define werrorcount diagnostic_kind_count (global_dc, DK_WERROR) +/* Similarly, but for sorrys. */ +#define sorrycount diagnostic_kind_count (global_dc, DK_SORRY) + +/* Returns nonzero if warnings should be emitted. */ +#define diagnostic_report_warnings_p(DC, LOC) \ + (!(DC)->dc_inhibit_warnings \ + && !(in_system_header_at (LOC) && !(DC)->dc_warn_system_headers)) + +/* Override the option index to be used for reporting a + diagnostic. */ + +static inline void +diagnostic_override_option_index (diagnostic_info *info, int optidx) +{ + info->option_index = optidx; +} + +/* Diagnostic related functions. */ +extern void diagnostic_initialize (diagnostic_context *, int); +extern void diagnostic_color_init (diagnostic_context *, int value = -1); +extern void diagnostic_urls_init (diagnostic_context *, int value = -1); +extern void diagnostic_finish (diagnostic_context *); +extern void diagnostic_report_current_module (diagnostic_context *, location_t); +extern void diagnostic_show_locus (diagnostic_context *, + rich_location *richloc, + diagnostic_t diagnostic_kind); +extern void diagnostic_show_any_path (diagnostic_context *, diagnostic_info *); + +/* Because we read source files a second time after the frontend did it the + first time, we need to know how the frontend handled things like character + set conversion and UTF-8 BOM stripping, in order to make everything + consistent. This function needs to be called by each frontend that requires + non-default behavior, to inform the diagnostics infrastructure how input is + to be processed. The default behavior is to do no conversion and not to + strip a UTF-8 BOM. + + The callback should return the input charset to be used to convert the given + file's contents to UTF-8, or it should return NULL if no conversion is needed + for this file. SHOULD_SKIP_BOM only applies in case no conversion was + performed, and if true, it will cause a UTF-8 BOM to be skipped at the + beginning of the file. (In case a conversion was performed, the BOM is + rather skipped as part of the conversion process.) */ + +void diagnostic_initialize_input_context (diagnostic_context *context, + diagnostic_input_charset_callback ccb, + bool should_skip_bom); + +/* Force diagnostics controlled by OPTIDX to be kind KIND. */ +extern diagnostic_t diagnostic_classify_diagnostic (diagnostic_context *, + int /* optidx */, + diagnostic_t /* kind */, + location_t); +extern void diagnostic_push_diagnostics (diagnostic_context *, location_t); +extern void diagnostic_pop_diagnostics (diagnostic_context *, location_t); +extern bool diagnostic_report_diagnostic (diagnostic_context *, + diagnostic_info *); +#ifdef ATTRIBUTE_GCC_DIAG +extern void diagnostic_set_info (diagnostic_info *, const char *, va_list *, + rich_location *, diagnostic_t) ATTRIBUTE_GCC_DIAG(2,0); +extern void diagnostic_set_info_translated (diagnostic_info *, const char *, + va_list *, rich_location *, + diagnostic_t) + ATTRIBUTE_GCC_DIAG(2,0); +extern void diagnostic_append_note (diagnostic_context *, location_t, + const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); +#endif +extern char *diagnostic_build_prefix (diagnostic_context *, const diagnostic_info *); +void default_diagnostic_starter (diagnostic_context *, diagnostic_info *); +void default_diagnostic_start_span_fn (diagnostic_context *, + expanded_location); +void default_diagnostic_finalizer (diagnostic_context *, diagnostic_info *, + diagnostic_t); +void diagnostic_set_caret_max_width (diagnostic_context *context, int value); +void diagnostic_action_after_output (diagnostic_context *, diagnostic_t); +void diagnostic_check_max_errors (diagnostic_context *, bool flush = false); + +void diagnostic_file_cache_fini (void); + +int get_terminal_width (void); + +/* Return the location associated to this diagnostic. Parameter WHICH + specifies which location. By default, expand the first one. */ + +static inline location_t +diagnostic_location (const diagnostic_info * diagnostic, int which = 0) +{ + return diagnostic->message.get_location (which); +} + +/* Return the number of locations to be printed in DIAGNOSTIC. */ + +static inline unsigned int +diagnostic_num_locations (const diagnostic_info * diagnostic) +{ + return diagnostic->message.m_richloc->get_num_locations (); +} + +/* Expand the location of this diagnostic. Use this function for + consistency. Parameter WHICH specifies which location. By default, + expand the first one. */ + +static inline expanded_location +diagnostic_expand_location (const diagnostic_info * diagnostic, int which = 0) +{ + return diagnostic->richloc->get_expanded_location (which); +} + +/* This is somehow the right-side margin of a caret line, that is, we + print at least these many characters after the position pointed at + by the caret. */ +const int CARET_LINE_MARGIN = 10; + +/* Return true if the two locations can be represented within the same + caret line. This is used to build a prefix and also to determine + whether to print one or two caret lines. */ + +static inline bool +diagnostic_same_line (const diagnostic_context *context, + expanded_location s1, expanded_location s2) +{ + return s2.column && s1.line == s2.line + && context->caret_max_width - CARET_LINE_MARGIN > abs (s1.column - s2.column); +} + +extern const char *diagnostic_get_color_for_kind (diagnostic_t kind); +extern int diagnostic_converted_column (diagnostic_context *context, + expanded_location s); + +/* Pure text formatting support functions. */ +extern char *file_name_as_prefix (diagnostic_context *, const char *); + +extern char *build_message_string (const char *, ...) ATTRIBUTE_PRINTF_1; + +extern void diagnostic_output_format_init (diagnostic_context *, + enum diagnostics_output_format); + +/* Compute the number of digits in the decimal representation of an integer. */ +extern int num_digits (int); + +extern json::value *json_from_expanded_location (diagnostic_context *context, + location_t loc); + +extern bool warning_enabled_at (location_t, int); + +#endif /* ! GCC_DIAGNOSTIC_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/digraph.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/digraph.h new file mode 100644 index 0000000..df1b870 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/digraph.h @@ -0,0 +1,246 @@ +/* Template classes for directed graphs. + Copyright (C) 2019-2022 Free Software Foundation, Inc. + Contributed by David Malcolm . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DIGRAPH_H +#define GCC_DIGRAPH_H + +#include "diagnostic.h" +#include "tree-diagnostic.h" /* for default_tree_printer. */ +#include "graphviz.h" + +/* Templates for a family of classes: digraph, node, edge, and cluster. + This assumes a traits type with the following typedefs: + node_t: the node class + edge_t: the edge class + dump_args_t: additional args for dot-dumps + cluster_t: the cluster class (for use when generating .dot files). + + Using a template allows for typesafe nodes and edges: a node's + predecessor and successor edges can be of a node-specific edge + subclass, without needing casting. */ + +/* Abstract base class for a node in a directed graph. */ + +template +class dnode +{ + public: + typedef typename GraphTraits::edge_t edge_t; + typedef typename GraphTraits::dump_args_t dump_args_t; + + virtual ~dnode () {} + virtual void dump_dot (graphviz_out *gv, const dump_args_t &args) const = 0; + + auto_vec m_preds; + auto_vec m_succs; +}; + +/* Abstract base class for an edge in a directed graph. */ + +template +class dedge +{ + public: + typedef typename GraphTraits::node_t node_t; + typedef typename GraphTraits::dump_args_t dump_args_t; + + dedge (node_t *src, node_t *dest) + : m_src (src), m_dest (dest) {} + + virtual ~dedge () {} + + virtual void dump_dot (graphviz_out *gv, const dump_args_t &args) const = 0; + + node_t *const m_src; + node_t *const m_dest; +}; + +/* Abstract base class for a directed graph. + This class maintains the vectors of nodes and edges, + and owns the nodes and edges. */ + +template +class digraph +{ + public: + typedef typename GraphTraits::node_t node_t; + typedef typename GraphTraits::edge_t edge_t; + typedef typename GraphTraits::dump_args_t dump_args_t; + typedef typename GraphTraits::cluster_t cluster_t; + + digraph () {} + virtual ~digraph () {} + + void dump_dot_to_pp (pretty_printer *pp, + cluster_t *root_cluster, + const dump_args_t &args) const; + void dump_dot_to_file (FILE *fp, + cluster_t *root_cluster, + const dump_args_t &args) const; + void dump_dot (const char *path, + cluster_t *root_cluster, + const dump_args_t &args) const; + + void add_node (node_t *node); + void add_edge (edge_t *edge); + + auto_delete_vec m_nodes; + auto_delete_vec m_edges; +}; + +/* Abstract base class for splitting dnodes into hierarchical clusters + in the generated .dot file. + + See "Subgraphs and Clusters" within + https://www.graphviz.org/doc/info/lang.html + and e.g. + https://graphviz.gitlab.io/_pages/Gallery/directed/cluster.html + + If a root_cluster is passed to dump_dot*, then all nodes will be + added to it at the start of dumping, via calls to add_node. + + The root cluster can organize the nodes into a hierarchy of + child clusters. + + After all nodes are added to the root cluster, dump_dot will then + be called on it (and not on the nodes themselves). */ + +template +class cluster +{ + public: + typedef typename GraphTraits::node_t node_t; + typedef typename GraphTraits::dump_args_t dump_args_t; + + virtual ~cluster () {} + + virtual void add_node (node_t *node) = 0; + + /* Recursively dump the cluster, all nodes, and child clusters. */ + virtual void dump_dot (graphviz_out *gv, const dump_args_t &) const = 0; +}; + +/* Write .dot information for this graph to PP, passing ARGS to the nodes + and edges. + If ROOT_CLUSTER is non-NULL, use it to organize the nodes into clusters. */ + +template +inline void +digraph::dump_dot_to_pp (pretty_printer *pp, + cluster_t *root_cluster, + const dump_args_t &args) const +{ + graphviz_out gv (pp); + + pp_string (pp, "digraph \""); + pp_string (pp, "base"); + pp_string (pp, "\" {\n"); + + gv.indent (); + + pp_string (pp, "overlap=false;\n"); + pp_string (pp, "compound=true;\n"); + + /* If using clustering, emit all nodes via clusters. */ + if (root_cluster) + { + int i; + node_t *n; + FOR_EACH_VEC_ELT (m_nodes, i, n) + root_cluster->add_node (n); + root_cluster->dump_dot (&gv, args); + } + else + { + /* Otherwise, display all nodes at top level. */ + int i; + node_t *n; + FOR_EACH_VEC_ELT (m_nodes, i, n) + n->dump_dot (&gv, args); + } + + /* Edges. */ + int i; + edge_t *e; + FOR_EACH_VEC_ELT (m_edges, i, e) + e->dump_dot (&gv, args); + + /* Terminate "digraph" */ + gv.outdent (); + pp_string (pp, "}"); + pp_newline (pp); +} + +/* Write .dot information for this graph to FP, passing ARGS to the nodes + and edges. + If ROOT_CLUSTER is non-NULL, use it to organize the nodes into clusters. */ + +template +inline void +digraph::dump_dot_to_file (FILE *fp, + cluster_t *root_cluster, + const dump_args_t &args) const +{ + pretty_printer pp; + // TODO: + pp_format_decoder (&pp) = default_tree_printer; + pp.buffer->stream = fp; + dump_dot_to_pp (&pp, root_cluster, args); + pp_flush (&pp); +} + +/* Write .dot information for this graph to a file at PATH, passing ARGS + to the nodes and edges. + If ROOT_CLUSTER is non-NULL, use it to organize the nodes into clusters. */ + +template +inline void +digraph::dump_dot (const char *path, + cluster_t *root_cluster, + const dump_args_t &args) const +{ + FILE *fp = fopen (path, "w"); + dump_dot_to_file (fp, root_cluster, args); + fclose (fp); +} + +/* Add NODE to this DIGRAPH, taking ownership. */ + +template +inline void +digraph::add_node (node_t *node) +{ + m_nodes.safe_push (node); +} + +/* Add EDGE to this digraph, and to the preds/succs of its endpoints. + Take ownership of EDGE. */ + +template +inline void +digraph::add_edge (edge_t *edge) +{ + m_edges.safe_push (edge); + edge->m_dest->m_preds.safe_push (edge); + edge->m_src->m_succs.safe_push (edge); + +} + +#endif /* GCC_DIGRAPH_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dojump.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dojump.h new file mode 100644 index 0000000..e379cce --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dojump.h @@ -0,0 +1,78 @@ +/* Export function prototypes from dojump.cc. + Copyright (C) 2015-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DOJUMP_H +#define GCC_DOJUMP_H + +/* At the start of a function, record that we have no previously-pushed + arguments waiting to be popped. */ +extern void init_pending_stack_adjust (void); + +/* Discard any pending stack adjustment. */ +extern void discard_pending_stack_adjust (void); + +/* When exiting from function, if safe, clear out any pending stack adjust + so the adjustment won't get done. */ +extern void clear_pending_stack_adjust (void); + +/* Pop any previously-pushed arguments that have not been popped yet. */ +extern void do_pending_stack_adjust (void); + +/* Struct for saving/restoring of pending_stack_adjust/stack_pointer_delta + values. */ + +class saved_pending_stack_adjust +{ +public: + /* Saved value of pending_stack_adjust. */ + poly_int64 x_pending_stack_adjust; + + /* Saved value of stack_pointer_delta. */ + poly_int64 x_stack_pointer_delta; +}; + +/* Remember pending_stack_adjust/stack_pointer_delta. + To be used around code that may call do_pending_stack_adjust (), + but the generated code could be discarded e.g. using delete_insns_since. */ + +extern void save_pending_stack_adjust (saved_pending_stack_adjust *); + +/* Restore the saved pending_stack_adjust/stack_pointer_delta. */ + +extern void restore_pending_stack_adjust (saved_pending_stack_adjust *); + +extern bool split_comparison (enum rtx_code, machine_mode, + enum rtx_code *, enum rtx_code *); + +/* Generate code to evaluate EXP and jump to LABEL if the value is nonzero. */ +extern void jumpif (tree exp, rtx_code_label *label, profile_probability prob); +extern void jumpif_1 (enum tree_code, tree, tree, rtx_code_label *, + profile_probability); + +/* Generate code to evaluate EXP and jump to LABEL if the value is zero. */ +extern void jumpifnot (tree exp, rtx_code_label *label, + profile_probability prob); +extern void jumpifnot_1 (enum tree_code, tree, tree, rtx_code_label *, + profile_probability); + +extern void do_compare_rtx_and_jump (rtx, rtx, enum rtx_code, int, + machine_mode, rtx, rtx_code_label *, + rtx_code_label *, profile_probability); + +#endif /* GCC_DOJUMP_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dominance.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dominance.h new file mode 100644 index 0000000..5dea800 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dominance.h @@ -0,0 +1,94 @@ +/* Calculate (post)dominators header file. + Copyright (C) 2014-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#ifndef GCC_DOMINANCE_H +#define GCC_DOMINANCE_H + +enum cdi_direction +{ + CDI_DOMINATORS = 1, + CDI_POST_DOMINATORS = 2 +}; + +/* State of dominance information. */ + +enum dom_state +{ + DOM_NONE, /* Not computed at all. */ + DOM_NO_FAST_QUERY, /* The data is OK, but the fast query data are not usable. */ + DOM_OK /* Everything is ok. */ +}; + +extern void calculate_dominance_info (enum cdi_direction); +extern void calculate_dominance_info_for_region (enum cdi_direction, + vec); +extern void free_dominance_info (function *, enum cdi_direction); +extern void free_dominance_info (enum cdi_direction); +extern void free_dominance_info_for_region (function *, + enum cdi_direction, + vec); +extern basic_block get_immediate_dominator (enum cdi_direction, basic_block); +extern void set_immediate_dominator (enum cdi_direction, basic_block, + basic_block); +extern auto_vec get_dominated_by (enum cdi_direction, basic_block); +extern auto_vec get_dominated_by_region (enum cdi_direction, + basic_block *, + unsigned); +extern auto_vec get_dominated_to_depth (enum cdi_direction, + basic_block, int); +extern auto_vec get_all_dominated_blocks (enum cdi_direction, + basic_block); +extern void redirect_immediate_dominators (enum cdi_direction, basic_block, + basic_block); +extern basic_block nearest_common_dominator (enum cdi_direction, + basic_block, basic_block); +extern basic_block nearest_common_dominator_for_set (enum cdi_direction, + bitmap); +extern bool dominated_by_p (enum cdi_direction, const_basic_block, + const_basic_block); +unsigned bb_dom_dfs_in (enum cdi_direction, basic_block); +unsigned bb_dom_dfs_out (enum cdi_direction, basic_block); +extern void verify_dominators (enum cdi_direction); + +/* Verify invariants of computed dominance information, if internal consistency + checks are enabled. */ + +static inline void +checking_verify_dominators (cdi_direction dir) +{ + if (flag_checking) + verify_dominators (dir); +} + +basic_block recompute_dominator (enum cdi_direction, basic_block); +extern void iterate_fix_dominators (enum cdi_direction, + vec &, bool); +extern void add_to_dominance_info (enum cdi_direction, basic_block); +extern void delete_from_dominance_info (enum cdi_direction, basic_block); +extern basic_block first_dom_son (enum cdi_direction, basic_block); +extern basic_block next_dom_son (enum cdi_direction, basic_block); +extern enum dom_state dom_info_state (function *, enum cdi_direction); +extern enum dom_state dom_info_state (enum cdi_direction); +extern void set_dom_info_availability (enum cdi_direction, enum dom_state); +extern bool dom_info_available_p (function *, enum cdi_direction); +extern bool dom_info_available_p (enum cdi_direction); + + + +#endif /* GCC_DOMINANCE_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/domwalk.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/domwalk.h new file mode 100644 index 0000000..17ddc1b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/domwalk.h @@ -0,0 +1,114 @@ +/* Generic dominator tree walker + Copyright (C) 2003-2022 Free Software Foundation, Inc. + Contributed by Diego Novillo + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DOM_WALK_H +#define GCC_DOM_WALK_H + +/** + * This is the main class for the dominator walker. It is expected that + * consumers will have a custom class inheriting from it, which will over ride + * at least one of before_dom_children and after_dom_children to implement the + * custom behavior. + */ +class dom_walker +{ +public: + static const edge STOP; + + /* An enum for determining whether the dom walk should be constrained to + blocks reachable by executable edges. */ + + enum reachability + { + /* Walk all blocks within the CFG. */ + ALL_BLOCKS, + + /* Use REACHABLE_BLOCKS when your subclass can discover that some edges + are not executable. + + If a subclass can discover that a COND, SWITCH or GOTO has a static + target in the before_dom_children callback, the taken edge should + be returned. The generic walker will clear EDGE_EXECUTABLE on all + edges it can determine are not executable. + + With REACHABLE_BLOCKS, EDGE_EXECUTABLE will be set on every edge in + the dom_walker ctor; the flag will then be cleared on edges that are + determined to be not executable. */ + REACHABLE_BLOCKS, + + /* Identical to REACHABLE_BLOCKS, but the initial state of EDGE_EXECUTABLE + will instead be preserved in the ctor, allowing for information about + non-executable edges to be merged in from an earlier analysis (and + potentially for additional edges to be marked as non-executable). */ + REACHABLE_BLOCKS_PRESERVING_FLAGS + }; + + /* You can provide a mapping of basic-block index to RPO if you + have that readily available or you do multiple walks. If you + specify NULL as BB_INDEX_TO_RPO dominator children will not be + walked in RPO order. */ + dom_walker (cdi_direction direction, enum reachability = ALL_BLOCKS, + int *bb_index_to_rpo = NULL); + + ~dom_walker (); + + /* Walk the dominator tree. */ + void walk (basic_block); + + /* Function to call before the recursive walk of the dominator children. + + Return value is the always taken edge if the block has multiple outgoing + edges, NULL otherwise. When skipping unreachable blocks, the walker + uses the taken edge information to clear EDGE_EXECUTABLE on the other + edges, exposing unreachable blocks. A NULL return value means all + outgoing edges should still be considered executable. A return value + of STOP means to stop the domwalk from processing dominated blocks from + here. This can be used to process a SEME region only (note domwalk + will still do work linear in function size). */ + virtual edge before_dom_children (basic_block) { return NULL; } + + /* Function to call after the recursive walk of the dominator children. */ + virtual void after_dom_children (basic_block) {} + +private: + /* This is the direction of the dominator tree we want to walk. i.e., + if it is set to CDI_DOMINATORS, then we walk the dominator tree, + if it is set to CDI_POST_DOMINATORS, then we walk the post + dominator tree. */ + const ENUM_BITFIELD (cdi_direction) m_dom_direction : 2; + const ENUM_BITFIELD (reachability) m_reachability : 2; + bool m_user_bb_to_rpo; + basic_block m_unreachable_dom; + int *m_bb_to_rpo; + + /* Query whether or not the given block is reachable or not. */ + bool bb_reachable (struct function *, basic_block); + + /* Given an unreachable block, propagate that property to outgoing + and possibly incoming edges for the block. Typically called after + determining a block is unreachable in the before_dom_children + callback. */ + void propagate_unreachable_to_edges (basic_block, FILE *, dump_flags_t); + +}; + +extern void set_all_edges_as_executable (function *fn); + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/double-int.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/double-int.h new file mode 100644 index 0000000..8f86595 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/double-int.h @@ -0,0 +1,470 @@ +/* Operations with long integers. + Copyright (C) 2006-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef DOUBLE_INT_H +#define DOUBLE_INT_H + +/* A large integer is currently represented as a pair of HOST_WIDE_INTs. + It therefore represents a number with precision of + 2 * HOST_BITS_PER_WIDE_INT bits (it is however possible that the + internal representation will change, if numbers with greater precision + are needed, so the users should not rely on it). The representation does + not contain any information about signedness of the represented value, so + it can be used to represent both signed and unsigned numbers. For + operations where the results depend on signedness (division, comparisons), + it must be specified separately. For each such operation, there are three + versions of the function -- double_int_op, that takes an extra UNS argument + giving the signedness of the values, and double_int_sop and double_int_uop + that stand for its specializations for signed and unsigned values. + + You may also represent with numbers in smaller precision using double_int. + You however need to use double_int_ext (that fills in the bits of the + number over the prescribed precision with zeros or with the sign bit) before + operations that do not perform arithmetics modulo 2^precision (comparisons, + division), and possibly before storing the results, if you want to keep + them in some canonical form). In general, the signedness of double_int_ext + should match the signedness of the operation. + + ??? The components of double_int differ in signedness mostly for + historical reasons (they replace an older structure used to represent + numbers with precision higher than HOST_WIDE_INT). It might be less + confusing to have them both signed or both unsigned. */ + +struct double_int +{ + /* Normally, we would define constructors to create instances. + Two things prevent us from doing so. + First, defining a constructor makes the class non-POD in C++03, + and we certainly want double_int to be a POD. + Second, the GCC conding conventions prefer explicit conversion, + and explicit conversion operators are not available until C++11. */ + + static double_int from_uhwi (unsigned HOST_WIDE_INT cst); + static double_int from_shwi (HOST_WIDE_INT cst); + static double_int from_pair (HOST_WIDE_INT high, unsigned HOST_WIDE_INT low); + + /* Construct from a fuffer of length LEN. BUFFER will be read according + to byte endianness and word endianness. */ + static double_int from_buffer (const unsigned char *buffer, int len); + + /* No copy assignment operator or destructor to keep the type a POD. */ + + /* There are some special value-creation static member functions. */ + + static double_int mask (unsigned prec); + static double_int max_value (unsigned int prec, bool uns); + static double_int min_value (unsigned int prec, bool uns); + + /* The following functions are mutating operations. */ + + double_int &operator ++ (); // prefix + double_int &operator -- (); // prefix + double_int &operator *= (double_int); + double_int &operator += (double_int); + double_int &operator -= (double_int); + double_int &operator &= (double_int); + double_int &operator ^= (double_int); + double_int &operator |= (double_int); + + /* The following functions are non-mutating operations. */ + + /* Conversion functions. */ + + HOST_WIDE_INT to_shwi () const; + unsigned HOST_WIDE_INT to_uhwi () const; + + /* Conversion query functions. */ + + bool fits_uhwi () const; + bool fits_shwi () const; + bool fits_hwi (bool uns) const; + + /* Attribute query functions. */ + + int trailing_zeros () const; + int popcount () const; + + /* Arithmetic query operations. */ + + bool multiple_of (double_int, bool, double_int *) const; + + /* Arithmetic operation functions. */ + + /* The following operations perform arithmetics modulo 2^precision, so you + do not need to call .ext between them, even if you are representing + numbers with precision less than HOST_BITS_PER_DOUBLE_INT bits. */ + + double_int set_bit (unsigned) const; + double_int mul_with_sign (double_int, bool unsigned_p, bool *overflow) const; + double_int wide_mul_with_sign (double_int, bool unsigned_p, + double_int *higher, bool *overflow) const; + double_int add_with_sign (double_int, bool unsigned_p, bool *overflow) const; + double_int sub_with_overflow (double_int, bool *overflow) const; + double_int neg_with_overflow (bool *overflow) const; + + double_int operator * (double_int) const; + double_int operator + (double_int) const; + double_int operator - (double_int) const; + double_int operator - () const; + double_int operator ~ () const; + double_int operator & (double_int) const; + double_int operator | (double_int) const; + double_int operator ^ (double_int) const; + double_int and_not (double_int) const; + + double_int lshift (HOST_WIDE_INT count) const; + double_int lshift (HOST_WIDE_INT count, unsigned int prec, bool arith) const; + double_int rshift (HOST_WIDE_INT count) const; + double_int rshift (HOST_WIDE_INT count, unsigned int prec, bool arith) const; + double_int alshift (HOST_WIDE_INT count, unsigned int prec) const; + double_int arshift (HOST_WIDE_INT count, unsigned int prec) const; + double_int llshift (HOST_WIDE_INT count, unsigned int prec) const; + double_int lrshift (HOST_WIDE_INT count, unsigned int prec) const; + double_int lrotate (HOST_WIDE_INT count, unsigned int prec) const; + double_int rrotate (HOST_WIDE_INT count, unsigned int prec) const; + + /* You must ensure that double_int::ext is called on the operands + of the following operations, if the precision of the numbers + is less than HOST_BITS_PER_DOUBLE_INT bits. */ + + double_int div (double_int, bool, unsigned) const; + double_int sdiv (double_int, unsigned) const; + double_int udiv (double_int, unsigned) const; + double_int mod (double_int, bool, unsigned) const; + double_int smod (double_int, unsigned) const; + double_int umod (double_int, unsigned) const; + double_int divmod_with_overflow (double_int, bool, unsigned, + double_int *, bool *) const; + double_int divmod (double_int, bool, unsigned, double_int *) const; + double_int sdivmod (double_int, unsigned, double_int *) const; + double_int udivmod (double_int, unsigned, double_int *) const; + + /* Precision control functions. */ + + double_int ext (unsigned prec, bool uns) const; + double_int zext (unsigned prec) const; + double_int sext (unsigned prec) const; + + /* Comparative functions. */ + + bool is_zero () const; + bool is_one () const; + bool is_minus_one () const; + bool is_negative () const; + + int cmp (double_int b, bool uns) const; + int ucmp (double_int b) const; + int scmp (double_int b) const; + + bool ult (double_int b) const; + bool ule (double_int b) const; + bool ugt (double_int b) const; + bool slt (double_int b) const; + bool sle (double_int b) const; + bool sgt (double_int b) const; + + double_int max (double_int b, bool uns); + double_int smax (double_int b); + double_int umax (double_int b); + + double_int min (double_int b, bool uns); + double_int smin (double_int b); + double_int umin (double_int b); + + bool operator == (double_int cst2) const; + bool operator != (double_int cst2) const; + + /* Please migrate away from using these member variables publicly. */ + + unsigned HOST_WIDE_INT low; + HOST_WIDE_INT high; + +}; + +#define HOST_BITS_PER_DOUBLE_INT (2 * HOST_BITS_PER_WIDE_INT) + +/* Constructors and conversions. */ + +/* Constructs double_int from integer CST. The bits over the precision of + HOST_WIDE_INT are filled with the sign bit. */ + +inline double_int +double_int::from_shwi (HOST_WIDE_INT cst) +{ + double_int r; + r.low = (unsigned HOST_WIDE_INT) cst; + r.high = cst < 0 ? -1 : 0; + return r; +} + +/* Some useful constants. */ +/* FIXME(crowl): Maybe remove after converting callers? + The problem is that a named constant would not be as optimizable, + while the functional syntax is more verbose. */ + +#define double_int_minus_one (double_int::from_shwi (-1)) +#define double_int_zero (double_int::from_shwi (0)) +#define double_int_one (double_int::from_shwi (1)) +#define double_int_two (double_int::from_shwi (2)) +#define double_int_ten (double_int::from_shwi (10)) + +/* Constructs double_int from unsigned integer CST. The bits over the + precision of HOST_WIDE_INT are filled with zeros. */ + +inline double_int +double_int::from_uhwi (unsigned HOST_WIDE_INT cst) +{ + double_int r; + r.low = cst; + r.high = 0; + return r; +} + +inline double_int +double_int::from_pair (HOST_WIDE_INT high, unsigned HOST_WIDE_INT low) +{ + double_int r; + r.low = low; + r.high = high; + return r; +} + +inline double_int & +double_int::operator ++ () +{ + *this += double_int_one; + return *this; +} + +inline double_int & +double_int::operator -- () +{ + *this -= double_int_one; + return *this; +} + +inline double_int & +double_int::operator &= (double_int b) +{ + *this = *this & b; + return *this; +} + +inline double_int & +double_int::operator ^= (double_int b) +{ + *this = *this ^ b; + return *this; +} + +inline double_int & +double_int::operator |= (double_int b) +{ + *this = *this | b; + return *this; +} + +/* Returns value of CST as a signed number. CST must satisfy + double_int::fits_signed. */ + +inline HOST_WIDE_INT +double_int::to_shwi () const +{ + return (HOST_WIDE_INT) low; +} + +/* Returns value of CST as an unsigned number. CST must satisfy + double_int::fits_unsigned. */ + +inline unsigned HOST_WIDE_INT +double_int::to_uhwi () const +{ + return low; +} + +/* Returns true if CST fits in unsigned HOST_WIDE_INT. */ + +inline bool +double_int::fits_uhwi () const +{ + return high == 0; +} + +/* Logical operations. */ + +/* Returns ~A. */ + +inline double_int +double_int::operator ~ () const +{ + double_int result; + result.low = ~low; + result.high = ~high; + return result; +} + +/* Returns A | B. */ + +inline double_int +double_int::operator | (double_int b) const +{ + double_int result; + result.low = low | b.low; + result.high = high | b.high; + return result; +} + +/* Returns A & B. */ + +inline double_int +double_int::operator & (double_int b) const +{ + double_int result; + result.low = low & b.low; + result.high = high & b.high; + return result; +} + +/* Returns A & ~B. */ + +inline double_int +double_int::and_not (double_int b) const +{ + double_int result; + result.low = low & ~b.low; + result.high = high & ~b.high; + return result; +} + +/* Returns A ^ B. */ + +inline double_int +double_int::operator ^ (double_int b) const +{ + double_int result; + result.low = low ^ b.low; + result.high = high ^ b.high; + return result; +} + +void dump_double_int (FILE *, double_int, bool); + +#define ALL_ONES HOST_WIDE_INT_M1U + +/* The operands of the following comparison functions must be processed + with double_int_ext, if their precision is less than + HOST_BITS_PER_DOUBLE_INT bits. */ + +/* Returns true if CST is zero. */ + +inline bool +double_int::is_zero () const +{ + return low == 0 && high == 0; +} + +/* Returns true if CST is one. */ + +inline bool +double_int::is_one () const +{ + return low == 1 && high == 0; +} + +/* Returns true if CST is minus one. */ + +inline bool +double_int::is_minus_one () const +{ + return low == ALL_ONES && high == -1; +} + +/* Returns true if CST is negative. */ + +inline bool +double_int::is_negative () const +{ + return high < 0; +} + +/* Returns true if CST1 == CST2. */ + +inline bool +double_int::operator == (double_int cst2) const +{ + return low == cst2.low && high == cst2.high; +} + +/* Returns true if CST1 != CST2. */ + +inline bool +double_int::operator != (double_int cst2) const +{ + return low != cst2.low || high != cst2.high; +} + +/* Return number of set bits of CST. */ + +inline int +double_int::popcount () const +{ + return popcount_hwi (high) + popcount_hwi (low); +} + + +#ifndef GENERATOR_FILE +/* Conversion to and from GMP integer representations. */ + +void mpz_set_double_int (mpz_t, double_int, bool); +double_int mpz_get_double_int (const_tree, mpz_t, bool); +#endif + +namespace wi +{ + template <> + struct int_traits + { + static const enum precision_type precision_type = CONST_PRECISION; + static const bool host_dependent_precision = true; + static const unsigned int precision = HOST_BITS_PER_DOUBLE_INT; + static unsigned int get_precision (const double_int &); + static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, + const double_int &); + }; +} + +inline unsigned int +wi::int_traits ::get_precision (const double_int &) +{ + return precision; +} + +inline wi::storage_ref +wi::int_traits ::decompose (HOST_WIDE_INT *scratch, unsigned int p, + const double_int &x) +{ + gcc_checking_assert (precision == p); + scratch[0] = x.low; + if ((x.high == 0 && scratch[0] >= 0) || (x.high == -1 && scratch[0] < 0)) + return wi::storage_ref (scratch, 1, precision); + scratch[1] = x.high; + return wi::storage_ref (scratch, 2, precision); +} + +#endif /* DOUBLE_INT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dump-context.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dump-context.h new file mode 100644 index 0000000..818b26e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dump-context.h @@ -0,0 +1,305 @@ +/* Support code for handling the various dump_* calls in dumpfile.h + Copyright (C) 2018-2022 Free Software Foundation, Inc. + Contributed by David Malcolm . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + + +#ifndef GCC_DUMP_CONTEXT_H +#define GCC_DUMP_CONTEXT_H 1 + +#include "dumpfile.h" +#include "pretty-print.h" +#include "selftest.h" +#include "optinfo.h" + +class optrecord_json_writer; +namespace selftest { class temp_dump_context; } +class debug_dump_context; + +/* A class for handling the various dump_* calls. + + In particular, this class has responsibility for consolidating + the "dump_*" calls into optinfo instances (delimited by "dump_*_loc" + calls), and emitting them. + + Putting this in a class (rather than as global state) allows + for selftesting of this code. */ + +class dump_context +{ + friend class selftest::temp_dump_context; + friend class debug_dump_context; + + public: + static dump_context &get () { return *s_current; } + + ~dump_context (); + + void refresh_dumps_are_enabled (); + + void dump_loc (const dump_metadata_t &metadata, + const dump_user_location_t &loc); + void dump_loc_immediate (dump_flags_t dump_kind, + const dump_user_location_t &loc); + + void dump_gimple_stmt (const dump_metadata_t &metadata, + dump_flags_t extra_dump_flags, + gimple *gs, int spc); + + void dump_gimple_stmt_loc (const dump_metadata_t &metadata, + const dump_user_location_t &loc, + dump_flags_t extra_dump_flags, + gimple *gs, int spc); + + void dump_gimple_expr (const dump_metadata_t &metadata, + dump_flags_t extra_dump_flags, + gimple *gs, int spc); + + void dump_gimple_expr_loc (const dump_metadata_t &metadata, + const dump_user_location_t &loc, + dump_flags_t extra_dump_flags, + gimple *gs, + int spc); + + void dump_generic_expr (const dump_metadata_t &metadata, + dump_flags_t extra_dump_flags, + tree t); + + void dump_generic_expr_loc (const dump_metadata_t &metadata, + const dump_user_location_t &loc, + dump_flags_t extra_dump_flags, + tree t); + + void dump_printf_va (const dump_metadata_t &metadata, const char *format, + va_list *ap) ATTRIBUTE_GCC_DUMP_PRINTF (3, 0); + + void dump_printf_loc_va (const dump_metadata_t &metadata, + const dump_user_location_t &loc, + const char *format, va_list *ap) + ATTRIBUTE_GCC_DUMP_PRINTF (4, 0); + + template + void dump_dec (const dump_metadata_t &metadata, const poly_int &value); + + void dump_symtab_node (const dump_metadata_t &metadata, symtab_node *node); + + /* Managing nested scopes. */ + unsigned int get_scope_depth () const; + void begin_scope (const char *name, + const dump_user_location_t &user_location, + const dump_impl_location_t &impl_location); + void end_scope (); + + /* Should optinfo instances be created? + All creation of optinfos should be guarded by this predicate. + Return true if any optinfo destinations are active. */ + bool optinfo_enabled_p () const; + + bool optimization_records_enabled_p () const + { + return m_json_writer != NULL; + } + void set_json_writer (optrecord_json_writer *writer); + void finish_any_json_writer (); + + void end_any_optinfo (); + + void emit_optinfo (const optinfo *info); + void emit_item (optinfo_item *item, dump_flags_t dump_kind); + + bool apply_dump_filter_p (dump_flags_t dump_kind, dump_flags_t filter) const; + + private: + optinfo &ensure_pending_optinfo (const dump_metadata_t &metadata); + optinfo &begin_next_optinfo (const dump_metadata_t &metadata, + const dump_user_location_t &loc); + + /* The current nesting depth of dump scopes, for showing nesting + via indentation). */ + unsigned int m_scope_depth; + + /* The optinfo currently being accumulated since the last dump_*_loc call, + if any. */ + optinfo *m_pending; + + /* If -fsave-optimization-record is enabled, the heap-allocated JSON writer + instance, otherwise NULL. */ + optrecord_json_writer *m_json_writer; + + /* For use in selftests: if non-NULL, then items are to be printed + to this, using the given flags. */ + pretty_printer *m_test_pp; + dump_flags_t m_test_pp_flags; + + /* The currently active dump_context, for use by the dump_* API calls. */ + static dump_context *s_current; + + /* The default active context. */ + static dump_context s_default; +}; + +/* A subclass of pretty_printer for implementing dump_context::dump_printf_va. + In particular, the formatted chunks are captured as optinfo_item instances, + thus retaining metadata about the entities being dumped (e.g. source + locations), rather than just as plain text. */ + +class dump_pretty_printer : public pretty_printer +{ +public: + dump_pretty_printer (dump_context *context, dump_flags_t dump_kind); + + void emit_items (optinfo *dest); + +private: + /* Information on an optinfo_item that was generated during phase 2 of + formatting. */ + class stashed_item + { + public: + stashed_item (const char **buffer_ptr_, optinfo_item *item_) + : buffer_ptr (buffer_ptr_), item (item_) {} + const char **buffer_ptr; + optinfo_item *item; + }; + + static bool format_decoder_cb (pretty_printer *pp, text_info *text, + const char *spec, int /*precision*/, + bool /*wide*/, bool /*set_locus*/, + bool /*verbose*/, bool */*quoted*/, + const char **buffer_ptr); + + bool decode_format (text_info *text, const char *spec, + const char **buffer_ptr); + + void stash_item (const char **buffer_ptr, optinfo_item *item); + + void emit_any_pending_textual_chunks (optinfo *dest); + + void emit_item (optinfo_item *item, optinfo *dest); + + dump_context *m_context; + dump_flags_t m_dump_kind; + auto_vec m_stashed_items; +}; + +/* An RAII-style class for use in debug dumpers for temporarily using a + different dump_context. It enables full details and outputs to + stderr instead of the currently active dump_file. */ + +class debug_dump_context +{ + public: + debug_dump_context (FILE *f = stderr); + ~debug_dump_context (); + + private: + dump_context m_context; + dump_context *m_saved; + dump_flags_t m_saved_flags; + dump_flags_t m_saved_pflags; + FILE *m_saved_file; +}; + + +#if CHECKING_P + +namespace selftest { + +/* An RAII-style class for use in selftests for temporarily using a different + dump_context. */ + +class temp_dump_context +{ + public: + temp_dump_context (bool forcibly_enable_optinfo, + bool forcibly_enable_dumping, + dump_flags_t test_pp_flags); + ~temp_dump_context (); + + /* Support for selftests. */ + optinfo *get_pending_optinfo () const { return m_context.m_pending; } + const char *get_dumped_text (); + + private: + pretty_printer m_pp; + dump_context m_context; + dump_context *m_saved; +}; + +/* Implementation detail of ASSERT_DUMPED_TEXT_EQ. */ + +extern void verify_dumped_text (const location &loc, + temp_dump_context *context, + const char *expected_text); + +/* Verify that the text dumped so far in CONTEXT equals + EXPECTED_TEXT. + As a side-effect, the internal buffer is 0-terminated. */ + +#define ASSERT_DUMPED_TEXT_EQ(CONTEXT, EXPECTED_TEXT) \ + SELFTEST_BEGIN_STMT \ + verify_dumped_text (SELFTEST_LOCATION, &(CONTEXT), (EXPECTED_TEXT)); \ + SELFTEST_END_STMT + + +/* Verify that ITEM has the expected values. */ + +void +verify_item (const location &loc, + const optinfo_item *item, + enum optinfo_item_kind expected_kind, + location_t expected_location, + const char *expected_text); + +/* Verify that ITEM is a text item, with EXPECTED_TEXT. */ + +#define ASSERT_IS_TEXT(ITEM, EXPECTED_TEXT) \ + SELFTEST_BEGIN_STMT \ + verify_item (SELFTEST_LOCATION, (ITEM), OPTINFO_ITEM_KIND_TEXT, \ + UNKNOWN_LOCATION, (EXPECTED_TEXT)); \ + SELFTEST_END_STMT + +/* Verify that ITEM is a tree item, with the expected values. */ + +#define ASSERT_IS_TREE(ITEM, EXPECTED_LOCATION, EXPECTED_TEXT) \ + SELFTEST_BEGIN_STMT \ + verify_item (SELFTEST_LOCATION, (ITEM), OPTINFO_ITEM_KIND_TREE, \ + (EXPECTED_LOCATION), (EXPECTED_TEXT)); \ + SELFTEST_END_STMT + +/* Verify that ITEM is a gimple item, with the expected values. */ + +#define ASSERT_IS_GIMPLE(ITEM, EXPECTED_LOCATION, EXPECTED_TEXT) \ + SELFTEST_BEGIN_STMT \ + verify_item (SELFTEST_LOCATION, (ITEM), OPTINFO_ITEM_KIND_GIMPLE, \ + (EXPECTED_LOCATION), (EXPECTED_TEXT)); \ + SELFTEST_END_STMT + +/* Verify that ITEM is a symtab node, with the expected values. */ + +#define ASSERT_IS_SYMTAB_NODE(ITEM, EXPECTED_LOCATION, EXPECTED_TEXT) \ + SELFTEST_BEGIN_STMT \ + verify_item (SELFTEST_LOCATION, (ITEM), OPTINFO_ITEM_KIND_SYMTAB_NODE, \ + (EXPECTED_LOCATION), (EXPECTED_TEXT)); \ + SELFTEST_END_STMT + +} // namespace selftest + +#endif /* CHECKING_P */ + +#endif /* GCC_DUMP_CONTEXT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dumpfile.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dumpfile.h new file mode 100644 index 0000000..3c47f09 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dumpfile.h @@ -0,0 +1,774 @@ +/* Definitions for the shared dumpfile. + Copyright (C) 2004-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + + +#ifndef GCC_DUMPFILE_H +#define GCC_DUMPFILE_H 1 + +#include "profile-count.h" + +/* An attribute for annotating formatting printing functions that use + the dumpfile/optinfo formatting codes. These are the pretty_printer + format codes (see pretty-print.cc), with additional codes for middle-end + specific entities (see dumpfile.cc). */ + +#if GCC_VERSION >= 9000 +#define ATTRIBUTE_GCC_DUMP_PRINTF(m, n) \ + __attribute__ ((__format__ (__gcc_dump_printf__, m ,n))) \ + ATTRIBUTE_NONNULL(m) +#else +#define ATTRIBUTE_GCC_DUMP_PRINTF(m, n) ATTRIBUTE_NONNULL(m) +#endif + +/* Different tree dump places. When you add new tree dump places, + extend the DUMP_FILES array in dumpfile.cc. */ +enum tree_dump_index +{ + TDI_none, /* No dump */ + TDI_cgraph, /* dump function call graph. */ + TDI_inheritance, /* dump type inheritance graph. */ + TDI_clones, /* dump IPA cloning decisions. */ + TDI_original, /* dump each function before optimizing it */ + TDI_gimple, /* dump each function after gimplifying it */ + TDI_nested, /* dump each function after unnesting it */ + TDI_lto_stream_out, /* dump information about lto streaming */ + TDI_profile_report, /* dump information about profile quality */ + + TDI_lang_all, /* enable all the language dumps. */ + TDI_tree_all, /* enable all the GENERIC/GIMPLE dumps. */ + TDI_rtl_all, /* enable all the RTL dumps. */ + TDI_ipa_all, /* enable all the IPA dumps. */ + + TDI_end +}; + +/* Enum used to distinguish dump files to types. */ + +enum dump_kind +{ + DK_none, + DK_lang, + DK_tree, + DK_rtl, + DK_ipa +}; + +/* Bit masks to control dumping. Not all values are applicable to all + dumps. Add new ones at the end. When you define new values, extend + the DUMP_OPTIONS array in dumpfile.cc. The TDF_* flags coexist with + MSG_* flags (for -fopt-info) and the bit values must be chosen to + allow that. */ +enum dump_flag : uint32_t +{ + /* Value of TDF_NONE is used just for bits filtered by TDF_KIND_MASK. */ + TDF_NONE = 0, + + /* Dump node addresses. */ + TDF_ADDRESS = (1 << 0), + + /* Don't go wild following links. */ + TDF_SLIM = (1 << 1), + + /* Don't unparse the function. */ + TDF_RAW = (1 << 2), + + /* Show more detailed info about each pass. */ + TDF_DETAILS = (1 << 3), + + /* Dump various statistics about each pass. */ + TDF_STATS = (1 << 4), + + /* Display basic block boundaries. */ + TDF_BLOCKS = (1 << 5), + + /* Display virtual operands. */ + TDF_VOPS = (1 << 6), + + /* Display statement line numbers. */ + TDF_LINENO = (1 << 7), + + /* Display decl UIDs. */ + TDF_UID = (1 << 8), + + /* Address of stmt. */ + TDF_STMTADDR = (1 << 9), + + /* A graph dump is being emitted. */ + TDF_GRAPH = (1 << 10), + + /* Display memory symbols in expr. + Implies TDF_VOPS. */ + TDF_MEMSYMS = (1 << 11), + + /* A flag to only print the RHS of a gimple stmt. */ + TDF_RHS_ONLY = (1 << 12), + + /* Display asm names of decls. */ + TDF_ASMNAME = (1 << 13), + + /* Display EH region number holding this gimple statement. */ + TDF_EH = (1 << 14), + + /* Omit UIDs from dumps. */ + TDF_NOUID = (1 << 15), + + /* Display alias information. */ + TDF_ALIAS = (1 << 16), + + /* Enumerate locals by uid. */ + TDF_ENUMERATE_LOCALS = (1 << 17), + + /* Dump cselib details. */ + TDF_CSELIB = (1 << 18), + + /* Dump SCEV details. */ + TDF_SCEV = (1 << 19), + + /* Dump in GIMPLE FE syntax. */ + TDF_GIMPLE = (1 << 20), + + /* Dump folding details. */ + TDF_FOLDING = (1 << 21), + + /* MSG_* flags for expressing the kinds of message to + be emitted by -fopt-info. */ + + /* -fopt-info optimized sources. */ + MSG_OPTIMIZED_LOCATIONS = (1 << 22), + + /* Missed opportunities. */ + MSG_MISSED_OPTIMIZATION = (1 << 23), + + /* General optimization info. */ + MSG_NOTE = (1 << 24), + + /* Mask for selecting MSG_-kind flags. */ + MSG_ALL_KINDS = (MSG_OPTIMIZED_LOCATIONS + | MSG_MISSED_OPTIMIZATION + | MSG_NOTE), + + /* MSG_PRIORITY_* flags for expressing the priority levels of message + to be emitted by -fopt-info, and filtering on them. + By default, messages at the top-level dump scope are "user-facing", + whereas those that are in nested scopes are implicitly "internals". + This behavior can be overridden for a given dump message by explicitly + specifying one of the MSG_PRIORITY_* flags. + + By default, dump files show both kinds of message, whereas -fopt-info + only shows "user-facing" messages, and requires the "-internals" + sub-option of -fopt-info to show the internal messages. */ + + /* Implicitly supplied for messages at the top-level dump scope. */ + MSG_PRIORITY_USER_FACING = (1 << 25), + + /* Implicitly supplied for messages within nested dump scopes. */ + MSG_PRIORITY_INTERNALS = (1 << 26), + + /* Supplied when an opt_problem generated in a nested scope is re-emitted + at the top-level. We want to default to showing these in -fopt-info + output, but to *not* show them in dump files, as the message would be + shown twice, messing up "scan-tree-dump-times" in DejaGnu tests. */ + MSG_PRIORITY_REEMITTED = (1 << 27), + + /* Mask for selecting MSG_PRIORITY_* flags. */ + MSG_ALL_PRIORITIES = (MSG_PRIORITY_USER_FACING + | MSG_PRIORITY_INTERNALS + | MSG_PRIORITY_REEMITTED), + + /* All -fdump- flags. */ + TDF_ALL_VALUES = (1 << 28) - 1, + + /* Dumping for -fcompare-debug. */ + TDF_COMPARE_DEBUG = (1 << 28), + + /* Dump a GIMPLE value which means wrapping certain things with _Literal. */ + TDF_GIMPLE_VAL = (1 << 29), + + /* For error. */ + TDF_ERROR = ((uint32_t)1 << 30), +}; + +/* Dump flags type. */ + +typedef enum dump_flag dump_flags_t; + +static inline dump_flags_t +operator| (dump_flags_t lhs, dump_flags_t rhs) +{ + return (dump_flags_t)((std::underlying_type::type)lhs + | (std::underlying_type::type)rhs); +} + +static inline dump_flags_t +operator& (dump_flags_t lhs, dump_flags_t rhs) +{ + return (dump_flags_t)((std::underlying_type::type)lhs + & (std::underlying_type::type)rhs); +} + +static inline dump_flags_t +operator~ (dump_flags_t flags) +{ + return (dump_flags_t)~((std::underlying_type::type)flags); +} + +static inline dump_flags_t & +operator|= (dump_flags_t &lhs, dump_flags_t rhs) +{ + lhs = (dump_flags_t)((std::underlying_type::type)lhs + | (std::underlying_type::type)rhs); + return lhs; +} + +static inline dump_flags_t & +operator&= (dump_flags_t &lhs, dump_flags_t rhs) +{ + lhs = (dump_flags_t)((std::underlying_type::type)lhs + & (std::underlying_type::type)rhs); + return lhs; +} + +/* Flags to control high-level -fopt-info dumps. Usually these flags + define a group of passes. An optimization pass can be part of + multiple groups. */ + +enum optgroup_flag +{ + OPTGROUP_NONE = 0, + + /* IPA optimization passes */ + OPTGROUP_IPA = (1 << 1), + + /* Loop optimization passes */ + OPTGROUP_LOOP = (1 << 2), + + /* Inlining passes */ + OPTGROUP_INLINE = (1 << 3), + + /* OMP (Offloading and Multi Processing) transformations */ + OPTGROUP_OMP = (1 << 4), + + /* Vectorization passes */ + OPTGROUP_VEC = (1 << 5), + + /* All other passes */ + OPTGROUP_OTHER = (1 << 6), + + OPTGROUP_ALL = (OPTGROUP_IPA | OPTGROUP_LOOP | OPTGROUP_INLINE + | OPTGROUP_OMP | OPTGROUP_VEC | OPTGROUP_OTHER) +}; + +typedef enum optgroup_flag optgroup_flags_t; + +static inline optgroup_flags_t +operator| (optgroup_flags_t lhs, optgroup_flags_t rhs) +{ + return (optgroup_flags_t)((std::underlying_type::type)lhs + | (std::underlying_type::type)rhs); +} + +static inline optgroup_flags_t & +operator|= (optgroup_flags_t &lhs, optgroup_flags_t rhs) +{ + lhs = (optgroup_flags_t)((std::underlying_type::type)lhs + | (std::underlying_type::type)rhs); + return lhs; +} + +/* Define a tree dump switch. */ +struct dump_file_info +{ + /* Suffix to give output file. */ + const char *suffix; + /* Command line dump switch. */ + const char *swtch; + /* Command line glob. */ + const char *glob; + /* Filename for the pass-specific stream. */ + const char *pfilename; + /* Filename for the -fopt-info stream. */ + const char *alt_filename; + /* Pass-specific dump stream. */ + FILE *pstream; + /* -fopt-info stream. */ + FILE *alt_stream; + /* Dump kind. */ + dump_kind dkind; + /* Dump flags. */ + dump_flags_t pflags; + /* A pass flags for -fopt-info. */ + dump_flags_t alt_flags; + /* Flags for -fopt-info given by a user. */ + optgroup_flags_t optgroup_flags; + /* State of pass-specific stream. */ + int pstate; + /* State of the -fopt-info stream. */ + int alt_state; + /* Dump file number. */ + int num; + /* Fields "suffix", "swtch", "glob" can be const strings, + or can be dynamically allocated, needing free. */ + bool owns_strings; + /* When a given dump file is being initialized, this flag is set to true + if the corresponding TDF_graph dump file has also been initialized. */ + bool graph_dump_initialized; +}; + +/* A class for describing where in the user's source that a dump message + relates to, with various constructors for convenience. + In particular, this lets us associate dump messages + with hotness information (e.g. from PGO), allowing them to + be prioritized by code hotness. */ + +class dump_user_location_t +{ + public: + /* Default constructor, analogous to UNKNOWN_LOCATION. */ + dump_user_location_t () : m_count (), m_loc (UNKNOWN_LOCATION) {} + + /* Construct from a gimple statement (using its location and hotness). */ + dump_user_location_t (const gimple *stmt); + + /* Construct from an RTL instruction (using its location and hotness). */ + dump_user_location_t (const rtx_insn *insn); + + /* Construct from a location_t. This one is deprecated (since it doesn't + capture hotness information); it thus needs to be spelled out. */ + static dump_user_location_t + from_location_t (location_t loc) + { + return dump_user_location_t (profile_count (), loc); + } + + /* Construct from a function declaration. This one requires spelling out + to avoid accidentally constructing from other kinds of tree. */ + static dump_user_location_t + from_function_decl (tree fndecl); + + profile_count get_count () const { return m_count; } + location_t get_location_t () const { return m_loc; } + + private: + /* Private ctor from count and location, for use by from_location_t. */ + dump_user_location_t (profile_count count, location_t loc) + : m_count (count), m_loc (loc) + {} + + profile_count m_count; + location_t m_loc; +}; + +/* A class for identifying where in the compiler's own source + (or a plugin) that a dump message is being emitted from. */ + +class dump_impl_location_t +{ +public: + dump_impl_location_t ( +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + const char *file = __builtin_FILE (), + int line = __builtin_LINE (), + const char *function = __builtin_FUNCTION () +#else + const char *file = __FILE__, + int line = __LINE__, + const char *function = NULL +#endif + ) + : m_file (file), m_line (line), m_function (function) + {} + + const char *m_file; + int m_line; + const char *m_function; +}; + +/* A bundle of metadata for describing a dump message: + (a) the dump_flags + (b) the source location within the compiler/plugin. + + The constructors use default parameters so that (b) gets sets up + automatically. + + Hence you can pass in e.g. MSG_NOTE, and the dump call + will automatically record where in GCC's source code the + dump was emitted from. */ + +class dump_metadata_t +{ + public: + dump_metadata_t (dump_flags_t dump_flags, + const dump_impl_location_t &impl_location + = dump_impl_location_t ()) + : m_dump_flags (dump_flags), + m_impl_location (impl_location) + { + } + + dump_flags_t get_dump_flags () const { return m_dump_flags; } + + const dump_impl_location_t & + get_impl_location () const { return m_impl_location; } + + private: + dump_flags_t m_dump_flags; + dump_impl_location_t m_impl_location; +}; + +/* A bundle of information for describing the location of a dump message: + (a) the source location and hotness within the user's code, together with + (b) the source location within the compiler/plugin. + + The constructors use default parameters so that (b) gets sets up + automatically. + + The upshot is that you can pass in e.g. a gimple * to dump_printf_loc, + and the dump call will automatically record where in GCC's source + code the dump was emitted from. */ + +class dump_location_t +{ + public: + /* Default constructor, analogous to UNKNOWN_LOCATION. */ + dump_location_t (const dump_impl_location_t &impl_location + = dump_impl_location_t ()) + : m_user_location (dump_user_location_t ()), + m_impl_location (impl_location) + { + } + + /* Construct from a gimple statement (using its location and hotness). */ + dump_location_t (const gimple *stmt, + const dump_impl_location_t &impl_location + = dump_impl_location_t ()) + : m_user_location (dump_user_location_t (stmt)), + m_impl_location (impl_location) + { + } + + /* Construct from an RTL instruction (using its location and hotness). */ + dump_location_t (const rtx_insn *insn, + const dump_impl_location_t &impl_location + = dump_impl_location_t ()) + : m_user_location (dump_user_location_t (insn)), + m_impl_location (impl_location) + { + } + + /* Construct from a dump_user_location_t. */ + dump_location_t (const dump_user_location_t &user_location, + const dump_impl_location_t &impl_location + = dump_impl_location_t ()) + : m_user_location (user_location), + m_impl_location (impl_location) + { + } + + /* Construct from a location_t. This one is deprecated (since it doesn't + capture hotness information), and thus requires spelling out. */ + static dump_location_t + from_location_t (location_t loc, + const dump_impl_location_t &impl_location + = dump_impl_location_t ()) + { + return dump_location_t (dump_user_location_t::from_location_t (loc), + impl_location); + } + + const dump_user_location_t & + get_user_location () const { return m_user_location; } + + const dump_impl_location_t & + get_impl_location () const { return m_impl_location; } + + location_t get_location_t () const + { + return m_user_location.get_location_t (); + } + + profile_count get_count () const { return m_user_location.get_count (); } + + private: + dump_user_location_t m_user_location; + dump_impl_location_t m_impl_location; +}; + +/* In dumpfile.cc */ +extern FILE *dump_begin (int, dump_flags_t *, int part=-1); +extern void dump_end (int, FILE *); +extern int opt_info_switch_p (const char *); +extern const char *dump_flag_name (int); +extern const kv_pair optgroup_options[]; +extern dump_flags_t +parse_dump_option (const char *, const char **); + +/* Global variables used to communicate with passes. */ +extern FILE *dump_file; +extern dump_flags_t dump_flags; +extern const char *dump_file_name; + +extern bool dumps_are_enabled; + +extern void set_dump_file (FILE *new_dump_file); + +/* Return true if any of the dumps is enabled, false otherwise. */ +static inline bool +dump_enabled_p (void) +{ + return dumps_are_enabled; +} + +/* The following API calls (which *don't* take a "FILE *") + write the output to zero or more locations. + + Some destinations are written to immediately as dump_* calls + are made; for others, the output is consolidated into an "optinfo" + instance (with its own metadata), and only emitted once the optinfo + is complete. + + The destinations are: + + (a) the "immediate" destinations: + (a.1) the active dump_file, if any + (a.2) the -fopt-info destination, if any + (b) the "optinfo" destinations, if any: + (b.1) as optimization records + + dump_* (MSG_*) --> dumpfile.cc --> items --> (a.1) dump_file + | `-> (a.2) alt_dump_file + | + `--> (b) optinfo + `---> optinfo destinations + (b.1) optimization records + + For optinfos, the dump_*_loc mark the beginning of an optinfo + instance: all subsequent dump_* calls are consolidated into + that optinfo, until the next dump_*_loc call (or a change in + dump scope, or a call to dumpfile_ensure_any_optinfo_are_flushed). + + A group of dump_* calls should be guarded by: + + if (dump_enabled_p ()) + + to minimize the work done for the common case where dumps + are disabled. */ + +extern void dump_printf (const dump_metadata_t &, const char *, ...) + ATTRIBUTE_GCC_DUMP_PRINTF (2, 3); + +extern void dump_printf_loc (const dump_metadata_t &, const dump_user_location_t &, + const char *, ...) + ATTRIBUTE_GCC_DUMP_PRINTF (3, 0); +extern void dump_function (int phase, tree fn); +extern void dump_basic_block (dump_flags_t, basic_block, int); +extern void dump_generic_expr_loc (const dump_metadata_t &, + const dump_user_location_t &, + dump_flags_t, tree); +extern void dump_generic_expr (const dump_metadata_t &, dump_flags_t, tree); +extern void dump_gimple_stmt_loc (const dump_metadata_t &, + const dump_user_location_t &, + dump_flags_t, gimple *, int); +extern void dump_gimple_stmt (const dump_metadata_t &, dump_flags_t, gimple *, int); +extern void dump_gimple_expr_loc (const dump_metadata_t &, + const dump_user_location_t &, + dump_flags_t, gimple *, int); +extern void dump_gimple_expr (const dump_metadata_t &, dump_flags_t, gimple *, int); +extern void dump_symtab_node (const dump_metadata_t &, symtab_node *); + +template +void dump_dec (const dump_metadata_t &, const poly_int &); +extern void dump_dec (dump_flags_t, const poly_wide_int &, signop); +extern void dump_hex (dump_flags_t, const poly_wide_int &); + +extern void dumpfile_ensure_any_optinfo_are_flushed (); + +/* Managing nested scopes, so that dumps can express the call chain + leading to a dump message. */ + +extern unsigned int get_dump_scope_depth (); +extern void dump_begin_scope (const char *name, + const dump_user_location_t &user_location, + const dump_impl_location_t &impl_location); +extern void dump_end_scope (); + +/* Implementation detail of the AUTO_DUMP_SCOPE macro below. + + A RAII-style class intended to make it easy to emit dump + information about entering and exiting a collection of nested + function calls. */ + +class auto_dump_scope +{ + public: + auto_dump_scope (const char *name, + const dump_user_location_t &user_location, + const dump_impl_location_t &impl_location + = dump_impl_location_t ()) + { + if (dump_enabled_p ()) + dump_begin_scope (name, user_location, impl_location); + } + ~auto_dump_scope () + { + if (dump_enabled_p ()) + dump_end_scope (); + } +}; + +/* A macro for calling: + dump_begin_scope (NAME, USER_LOC); + via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc, + and then calling + dump_end_scope (); + once the object goes out of scope, thus capturing the nesting of + the scopes. + + These scopes affect dump messages within them: dump messages at the + top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those + in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */ + +#define AUTO_DUMP_SCOPE(NAME, USER_LOC) \ + auto_dump_scope scope (NAME, USER_LOC) + +extern void dump_function (int phase, tree fn); +extern void print_combine_total_stats (void); +extern bool enable_rtl_dump_file (void); + +/* In tree-dump.cc */ +extern void dump_node (const_tree, dump_flags_t, FILE *); + +/* In combine.cc */ +extern void dump_combine_total_stats (FILE *); +/* In cfghooks.cc */ +extern void dump_bb (FILE *, basic_block, int, dump_flags_t); + +class opt_pass; + +namespace gcc { + +/* A class for managing all of the various dump files used by the + optimization passes. */ + +class dump_manager +{ +public: + + dump_manager (); + ~dump_manager (); + + /* Register a dumpfile. + + TAKE_OWNERSHIP determines whether callee takes ownership of strings + SUFFIX, SWTCH, and GLOB. */ + unsigned int + dump_register (const char *suffix, const char *swtch, const char *glob, + dump_kind dkind, optgroup_flags_t optgroup_flags, + bool take_ownership); + + /* Allow languages and middle-end to register their dumps before the + optimization passes. */ + void + register_dumps (); + + /* Return the dump_file_info for the given phase. */ + struct dump_file_info * + get_dump_file_info (int phase) const; + + struct dump_file_info * + get_dump_file_info_by_switch (const char *swtch) const; + + /* Return the name of the dump file for the given phase. + If the dump is not enabled, returns NULL. */ + char * + get_dump_file_name (int phase, int part = -1) const; + + char * + get_dump_file_name (struct dump_file_info *dfi, int part = -1) const; + + void + dump_switch_p (const char *arg); + + /* Start a dump for PHASE. Store user-supplied dump flags in + *FLAG_PTR. Return the number of streams opened. Set globals + DUMP_FILE, and ALT_DUMP_FILE to point to the opened streams, and + set dump_flags appropriately for both pass dump stream and + -fopt-info stream. */ + int + dump_start (int phase, dump_flags_t *flag_ptr); + + /* Finish a tree dump for PHASE and close associated dump streams. Also + reset the globals DUMP_FILE, ALT_DUMP_FILE, and DUMP_FLAGS. */ + void + dump_finish (int phase); + + FILE * + dump_begin (int phase, dump_flags_t *flag_ptr, int part); + + /* Returns nonzero if tree dump PHASE has been initialized. */ + int + dump_initialized_p (int phase) const; + + /* Returns the switch name of PHASE. */ + const char * + dump_flag_name (int phase) const; + + void register_pass (opt_pass *pass); + +private: + + int + dump_phase_enabled_p (int phase) const; + + int + dump_switch_p_1 (const char *arg, struct dump_file_info *dfi, bool doglob); + + int + dump_enable_all (dump_kind dkind, dump_flags_t flags, const char *filename); + + int + opt_info_enable_passes (optgroup_flags_t optgroup_flags, dump_flags_t flags, + const char *filename); + + bool update_dfi_for_opt_info (dump_file_info *dfi) const; + +private: + + /* Dynamically registered dump files and switches. */ + int m_next_dump; + struct dump_file_info *m_extra_dump_files; + size_t m_extra_dump_files_in_use; + size_t m_extra_dump_files_alloced; + + /* Stored values from -fopt-info, for handling passes created after + option-parsing (by backends and by plugins). */ + optgroup_flags_t m_optgroup_flags; + dump_flags_t m_optinfo_flags; + char *m_optinfo_filename; + + /* Grant access to dump_enable_all. */ + friend bool ::enable_rtl_dump_file (void); + + /* Grant access to opt_info_enable_passes. */ + friend int ::opt_info_switch_p (const char *arg); + +}; // class dump_manager + +} // namespace gcc + +#endif /* GCC_DUMPFILE_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dwarf2asm.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dwarf2asm.h new file mode 100644 index 0000000..871a77b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dwarf2asm.h @@ -0,0 +1,100 @@ +/* Dwarf2 assembler output helper routines. + Copyright (C) 2001-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DWARF2ASM_H +#define GCC_DWARF2ASM_H + +extern void dw2_assemble_integer (int, rtx); + +extern void dw2_asm_output_data_raw (int, unsigned HOST_WIDE_INT); + +extern void dw2_asm_output_data (int, unsigned HOST_WIDE_INT, + const char *, ...) + ATTRIBUTE_NULL_PRINTF_3; + +extern void dw2_asm_output_delta (int, const char *, const char *, + const char *, ...) + ATTRIBUTE_NULL_PRINTF_4; + +extern void dw2_asm_output_vms_delta (int, const char *, const char *, + const char *, ...) + ATTRIBUTE_NULL_PRINTF_4; + +extern void dw2_asm_output_offset (int, const char *, section *, + const char *, ...) + ATTRIBUTE_NULL_PRINTF_4; + +extern void dw2_asm_output_offset (int, const char *, HOST_WIDE_INT, + section *, const char *, ...) + ATTRIBUTE_NULL_PRINTF_5; + +extern void dw2_asm_output_addr (int, const char *, const char *, ...) + ATTRIBUTE_NULL_PRINTF_3; + +extern void dw2_asm_output_addr_rtx (int, rtx, const char *, ...) + ATTRIBUTE_NULL_PRINTF_3; + +extern void dw2_asm_output_encoded_addr_rtx (int, rtx, bool, + const char *, ...) + ATTRIBUTE_NULL_PRINTF_4; + +extern void dw2_asm_output_nstring (const char *, size_t, + const char *, ...) + ATTRIBUTE_NULL_PRINTF_3; + +extern void dw2_asm_output_data_uleb128_raw (unsigned HOST_WIDE_INT); + +extern void dw2_asm_output_data_uleb128 (unsigned HOST_WIDE_INT, + const char *, ...) + ATTRIBUTE_NULL_PRINTF_2; + +extern void dw2_asm_output_data_sleb128_raw (HOST_WIDE_INT); + +extern void dw2_asm_output_data_sleb128 (HOST_WIDE_INT, + const char *, ...) + ATTRIBUTE_NULL_PRINTF_2; + +extern void dw2_asm_output_symname_uleb128 (const char *, + const char *, ...) + ATTRIBUTE_NULL_PRINTF_2; + +extern void dw2_asm_output_delta_uleb128 (const char *, const char *, + const char *, ...) + ATTRIBUTE_NULL_PRINTF_3; + +extern int size_of_uleb128 (unsigned HOST_WIDE_INT); +extern int size_of_sleb128 (HOST_WIDE_INT); +extern int size_of_encoded_value (int); +extern const char *eh_data_format_name (int); + +extern rtx dw2_force_const_mem (rtx, bool); +extern void dw2_output_indirect_constants (void); + +/* These are currently unused. */ + +#if 0 +extern void dw2_asm_output_pcrel (int, const char *, const char *, ...) + ATTRIBUTE_NULL_PRINTF_3; + +extern void dw2_asm_output_delta_sleb128 (const char *, const char *, + const char *, ...) + ATTRIBUTE_NULL_PRINTF_3; +#endif + +#endif /* GCC_DWARF2ASM_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dwarf2ctf.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dwarf2ctf.h new file mode 100644 index 0000000..ae917ac --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dwarf2ctf.h @@ -0,0 +1,55 @@ +/* dwarf2ctf.h - DWARF interface for CTF/BTF generation. + Copyright (C) 2021-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* This file contains declarations and prototypes to define an interface + between DWARF and CTF/BTF generation. */ + +#ifndef GCC_DWARF2CTF_H +#define GCC_DWARF2CTF_H 1 + +#include "dwarf2out.h" +#include "flags.h" + +/* Debug Format Interface. Used in dwarf2out.cc. */ + +extern void ctf_debug_init (void); +extern void ctf_debug_init_postprocess (bool); +extern bool ctf_do_die (dw_die_ref); +extern void ctf_debug_early_finish (const char *); +extern void ctf_debug_finish (const char *); + +/* Wrappers for CTF/BTF to fetch information from GCC DWARF DIE. Used in + ctfc.cc. + + A CTF container does not store all debug information internally. Some of + the info is fetched indirectly via the DIE reference available in each CTF + container entry. + + These functions will be used by the CTF container to give access to its + consumers (CTF/BTF) to various debug information available in DWARF DIE. + Direct access to debug information in GCC dwarf structures by the consumers + of CTF/BTF information is not ideal. */ + +/* Source location information. */ + +extern const char * ctf_get_die_loc_file (dw_die_ref); +extern unsigned int ctf_get_die_loc_line (dw_die_ref); +extern unsigned int ctf_get_die_loc_col (dw_die_ref); + +#endif /* GCC_DWARF2CTF_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dwarf2out.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dwarf2out.h new file mode 100644 index 0000000..656ef94 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/dwarf2out.h @@ -0,0 +1,470 @@ +/* dwarf2out.h - Various declarations for functions found in dwarf2out.cc + Copyright (C) 1998-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_DWARF2OUT_H +#define GCC_DWARF2OUT_H 1 + +#include "dwarf2.h" /* ??? Remove this once only used by dwarf2foo.c. */ + +typedef struct die_struct *dw_die_ref; +typedef const struct die_struct *const_dw_die_ref; + +typedef struct dw_val_node *dw_val_ref; +typedef struct dw_cfi_node *dw_cfi_ref; +typedef struct dw_loc_descr_node *dw_loc_descr_ref; +typedef struct dw_loc_list_struct *dw_loc_list_ref; +typedef struct dw_discr_list_node *dw_discr_list_ref; +typedef wide_int *wide_int_ptr; + + +/* Call frames are described using a sequence of Call Frame + Information instructions. The register number, offset + and address fields are provided as possible operands; + their use is selected by the opcode field. */ + +enum dw_cfi_oprnd_type { + dw_cfi_oprnd_unused, + dw_cfi_oprnd_reg_num, + dw_cfi_oprnd_offset, + dw_cfi_oprnd_addr, + dw_cfi_oprnd_loc, + dw_cfi_oprnd_cfa_loc +}; + +typedef union GTY(()) { + unsigned int GTY ((tag ("dw_cfi_oprnd_reg_num"))) dw_cfi_reg_num; + HOST_WIDE_INT GTY ((tag ("dw_cfi_oprnd_offset"))) dw_cfi_offset; + const char * GTY ((tag ("dw_cfi_oprnd_addr"))) dw_cfi_addr; + struct dw_loc_descr_node * GTY ((tag ("dw_cfi_oprnd_loc"))) dw_cfi_loc; + struct dw_cfa_location * GTY ((tag ("dw_cfi_oprnd_cfa_loc"))) + dw_cfi_cfa_loc; +} dw_cfi_oprnd; + +struct GTY(()) dw_cfi_node { + enum dwarf_call_frame_info dw_cfi_opc; + dw_cfi_oprnd GTY ((desc ("dw_cfi_oprnd1_desc (%1.dw_cfi_opc)"))) + dw_cfi_oprnd1; + dw_cfi_oprnd GTY ((desc ("dw_cfi_oprnd2_desc (%1.dw_cfi_opc)"))) + dw_cfi_oprnd2; +}; + + +typedef vec *cfi_vec; + +typedef struct dw_fde_node *dw_fde_ref; + +/* All call frame descriptions (FDE's) in the GCC generated DWARF + refer to a single Common Information Entry (CIE), defined at + the beginning of the .debug_frame section. This use of a single + CIE obviates the need to keep track of multiple CIE's + in the DWARF generation routines below. */ + +struct GTY(()) dw_fde_node { + tree decl; + const char *dw_fde_begin; + const char *dw_fde_current_label; + const char *dw_fde_end; + const char *dw_fde_vms_end_prologue; + const char *dw_fde_vms_begin_epilogue; + const char *dw_fde_second_begin; + const char *dw_fde_second_end; + cfi_vec dw_fde_cfi; + int dw_fde_switch_cfi_index; /* Last CFI before switching sections. */ + HOST_WIDE_INT stack_realignment; + + unsigned funcdef_number; + unsigned fde_index; + + /* Dynamic realign argument pointer register. */ + unsigned int drap_reg; + /* Virtual dynamic realign argument pointer register. */ + unsigned int vdrap_reg; + /* These 3 flags are copied from rtl_data in function.h. */ + unsigned all_throwers_are_sibcalls : 1; + unsigned uses_eh_lsda : 1; + unsigned nothrow : 1; + /* Whether we did stack realign in this call frame. */ + unsigned stack_realign : 1; + /* Whether dynamic realign argument pointer register has been saved. */ + unsigned drap_reg_saved: 1; + /* True iff dw_fde_begin label is in text_section or cold_text_section. */ + unsigned in_std_section : 1; + /* True iff dw_fde_second_begin label is in text_section or + cold_text_section. */ + unsigned second_in_std_section : 1; + /* True if Rule 18 described in dwarf2cfi.cc is in action, i.e. for dynamic + stack realignment in between pushing of hard frame pointer to stack + and setting hard frame pointer to stack pointer. The register save for + hard frame pointer register should be emitted only on the latter + instruction. */ + unsigned rule18 : 1; + /* True if this function is to be ignored by debugger. */ + unsigned ignored_debug : 1; +}; + + +/* This represents a register, in DWARF_FRAME_REGNUM space, for use in CFA + definitions and expressions. + Most architectures only need a single register number, but some (amdgcn) + have pointers that span multiple registers. DWARF permits arbitrary + register sets but existing use-cases only require contiguous register + sets, as represented here. */ +struct GTY(()) cfa_reg { + unsigned int reg; + unsigned short span; + unsigned short span_width; /* A.K.A. register mode size. */ + + cfa_reg& set_by_dwreg (unsigned int r) + { + reg = r; + span = 1; + span_width = 0; /* Unknown size (permitted when span == 1). */ + return *this; + } + + bool operator== (const cfa_reg &other) const + { + return (reg == other.reg && span == other.span + && (span_width == other.span_width + || (span == 1 + && (span_width == 0 || other.span_width == 0)))); + } + + bool operator!= (const cfa_reg &other) const + { + return !(*this == other); + } +}; + +/* This is how we define the location of the CFA. We use to handle it + as REG + OFFSET all the time, but now it can be more complex. + It can now be either REG + CFA_OFFSET or *(REG + BASE_OFFSET) + CFA_OFFSET. + Instead of passing around REG and OFFSET, we pass a copy + of this structure. */ +struct GTY(()) dw_cfa_location { + poly_int64_pod offset; + poly_int64_pod base_offset; + /* REG is in DWARF_FRAME_REGNUM space, *not* normal REGNO space. */ + struct cfa_reg reg; + BOOL_BITFIELD indirect : 1; /* 1 if CFA is accessed via a dereference. */ + BOOL_BITFIELD in_use : 1; /* 1 if a saved cfa is stored here. */ +}; + + +/* Each DIE may have a series of attribute/value pairs. Values + can take on several forms. The forms that are used in this + implementation are listed below. */ + +enum dw_val_class +{ + dw_val_class_none, + dw_val_class_addr, + dw_val_class_offset, + dw_val_class_loc, + dw_val_class_loc_list, + dw_val_class_range_list, + dw_val_class_const, + dw_val_class_unsigned_const, + dw_val_class_const_double, + dw_val_class_wide_int, + dw_val_class_vec, + dw_val_class_flag, + dw_val_class_die_ref, + dw_val_class_fde_ref, + dw_val_class_lbl_id, + dw_val_class_lineptr, + dw_val_class_str, + dw_val_class_macptr, + dw_val_class_loclistsptr, + dw_val_class_file, + dw_val_class_data8, + dw_val_class_decl_ref, + dw_val_class_vms_delta, + dw_val_class_high_pc, + dw_val_class_discr_value, + dw_val_class_discr_list, + dw_val_class_const_implicit, + dw_val_class_unsigned_const_implicit, + dw_val_class_file_implicit, + dw_val_class_view_list, + dw_val_class_symview +}; + +/* Describe a floating point constant value, or a vector constant value. */ + +struct GTY(()) dw_vec_const { + void * GTY((atomic)) array; + unsigned length; + unsigned elt_size; +}; + +/* Describe a single value that a discriminant can match. + + Discriminants (in the "record variant part" meaning) are scalars. + dw_discr_list_ref and dw_discr_value are a mean to describe a set of + discriminant values that are matched by a particular variant. + + Discriminants can be signed or unsigned scalars, and can be discriminants + values. Both have to be consistent, though. */ + +struct GTY(()) dw_discr_value { + int pos; /* Whether the discriminant value is positive (unsigned). */ + union + { + HOST_WIDE_INT GTY ((tag ("0"))) sval; + unsigned HOST_WIDE_INT GTY ((tag ("1"))) uval; + } + GTY ((desc ("%1.pos"))) v; +}; + +struct addr_table_entry; + +/* The dw_val_node describes an attribute's value, as it is + represented internally. */ + +struct GTY(()) dw_val_node { + enum dw_val_class val_class; + struct addr_table_entry * GTY(()) val_entry; + union dw_val_struct_union + { + rtx GTY ((tag ("dw_val_class_addr"))) val_addr; + unsigned HOST_WIDE_INT GTY ((tag ("dw_val_class_offset"))) val_offset; + dw_loc_list_ref GTY ((tag ("dw_val_class_loc_list"))) val_loc_list; + dw_die_ref GTY ((tag ("dw_val_class_view_list"))) val_view_list; + dw_loc_descr_ref GTY ((tag ("dw_val_class_loc"))) val_loc; + HOST_WIDE_INT GTY ((default)) val_int; + unsigned HOST_WIDE_INT + GTY ((tag ("dw_val_class_unsigned_const"))) val_unsigned; + double_int GTY ((tag ("dw_val_class_const_double"))) val_double; + wide_int_ptr GTY ((tag ("dw_val_class_wide_int"))) val_wide; + dw_vec_const GTY ((tag ("dw_val_class_vec"))) val_vec; + struct dw_val_die_union + { + dw_die_ref die; + int external; + } GTY ((tag ("dw_val_class_die_ref"))) val_die_ref; + unsigned GTY ((tag ("dw_val_class_fde_ref"))) val_fde_index; + struct indirect_string_node * GTY ((tag ("dw_val_class_str"))) val_str; + char * GTY ((tag ("dw_val_class_lbl_id"))) val_lbl_id; + unsigned char GTY ((tag ("dw_val_class_flag"))) val_flag; + struct dwarf_file_data * GTY ((tag ("dw_val_class_file"))) val_file; + struct dwarf_file_data * + GTY ((tag ("dw_val_class_file_implicit"))) val_file_implicit; + unsigned char GTY ((tag ("dw_val_class_data8"))) val_data8[8]; + tree GTY ((tag ("dw_val_class_decl_ref"))) val_decl_ref; + struct dw_val_vms_delta_union + { + char * lbl1; + char * lbl2; + } GTY ((tag ("dw_val_class_vms_delta"))) val_vms_delta; + dw_discr_value GTY ((tag ("dw_val_class_discr_value"))) val_discr_value; + dw_discr_list_ref GTY ((tag ("dw_val_class_discr_list"))) val_discr_list; + char * GTY ((tag ("dw_val_class_symview"))) val_symbolic_view; + } + GTY ((desc ("%1.val_class"))) v; +}; + +/* Locations in memory are described using a sequence of stack machine + operations. */ + +struct GTY((chain_next ("%h.dw_loc_next"))) dw_loc_descr_node { + dw_loc_descr_ref dw_loc_next; + ENUM_BITFIELD (dwarf_location_atom) dw_loc_opc : 8; + /* Used to distinguish DW_OP_addr with a direct symbol relocation + from DW_OP_addr with a dtp-relative symbol relocation. */ + unsigned int dtprel : 1; + /* For DW_OP_pick, DW_OP_dup and DW_OP_over operations: true iff. + it targets a DWARF prodecure argument. In this case, it needs to be + relocated according to the current frame offset. */ + unsigned int frame_offset_rel : 1; + int dw_loc_addr; + dw_val_node dw_loc_oprnd1; + dw_val_node dw_loc_oprnd2; +}; + +/* A variant (inside a record variant part) is selected when the corresponding + discriminant matches its set of values (see the comment for dw_discr_value). + The following datastructure holds such matching information. */ + +struct GTY(()) dw_discr_list_node { + dw_discr_list_ref dw_discr_next; + + dw_discr_value dw_discr_lower_bound; + dw_discr_value dw_discr_upper_bound; + /* This node represents only the value in dw_discr_lower_bound when it's + zero. It represents the range between the two fields (bounds included) + otherwise. */ + int dw_discr_range; +}; + +/* Interface from dwarf2out.cc to dwarf2cfi.cc. */ +extern struct dw_loc_descr_node *build_cfa_loc + (dw_cfa_location *, poly_int64); +extern struct dw_loc_descr_node *build_cfa_aligned_loc + (dw_cfa_location *, poly_int64, HOST_WIDE_INT); +extern struct dw_loc_descr_node *build_span_loc (struct cfa_reg); +extern struct dw_loc_descr_node *mem_loc_descriptor + (rtx, machine_mode mode, machine_mode mem_mode, + enum var_init_status); +extern bool loc_descr_equal_p (dw_loc_descr_ref, dw_loc_descr_ref); +extern dw_fde_ref dwarf2out_alloc_current_fde (void); + +extern unsigned long size_of_locs (dw_loc_descr_ref); +extern void output_loc_sequence (dw_loc_descr_ref, int); +extern void output_loc_sequence_raw (dw_loc_descr_ref); + +/* Interface from dwarf2cfi.cc to dwarf2out.cc. */ +extern void lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, + dw_cfa_location *remember); +extern bool cfa_equal_p (const dw_cfa_location *, const dw_cfa_location *); + +extern void output_cfi (dw_cfi_ref, dw_fde_ref, int); + +extern GTY(()) cfi_vec cie_cfi_vec; + +/* Interface from dwarf2*.c to the rest of the compiler. */ +extern enum dw_cfi_oprnd_type dw_cfi_oprnd1_desc + (enum dwarf_call_frame_info cfi); +extern enum dw_cfi_oprnd_type dw_cfi_oprnd2_desc + (enum dwarf_call_frame_info cfi); + +extern void output_cfi_directive (FILE *f, struct dw_cfi_node *cfi); + +extern void dwarf2out_emit_cfi (dw_cfi_ref cfi); + +extern void debug_dwarf (void); +struct die_struct; +extern void debug_dwarf_die (struct die_struct *); +extern void debug_dwarf_loc_descr (dw_loc_descr_ref); +extern void debug (die_struct &ref); +extern void debug (die_struct *ptr); +extern void dwarf2out_set_demangle_name_func (const char *(*) (const char *)); +#ifdef VMS_DEBUGGING_INFO +extern void dwarf2out_vms_debug_main_pointer (void); +#endif + +enum array_descr_ordering +{ + array_descr_ordering_default, + array_descr_ordering_row_major, + array_descr_ordering_column_major +}; + +#define DWARF2OUT_ARRAY_DESCR_INFO_MAX_DIMEN 16 + +struct array_descr_info +{ + int ndimensions; + enum array_descr_ordering ordering; + tree element_type; + tree base_decl; + tree data_location; + tree allocated; + tree associated; + tree stride; + tree rank; + bool stride_in_bits; + struct array_descr_dimen + { + /* GCC uses sizetype for array indices, so lower_bound and upper_bound + will likely be "sizetype" values. However, bounds may have another + type in the original source code. */ + tree bounds_type; + tree lower_bound; + tree upper_bound; + + /* Only Fortran uses more than one dimension for array types. For other + languages, the stride can be rather specified for the whole array. */ + tree stride; + } dimen[DWARF2OUT_ARRAY_DESCR_INFO_MAX_DIMEN]; +}; + +enum fixed_point_scale_factor +{ + fixed_point_scale_factor_binary, + fixed_point_scale_factor_decimal, + fixed_point_scale_factor_arbitrary +}; + +struct fixed_point_type_info +{ + /* The scale factor is the value one has to multiply the actual data with + to get the fixed point value. We support three ways to encode it. */ + enum fixed_point_scale_factor scale_factor_kind; + union + { + /* For a binary scale factor, the scale factor is 2 ** binary. */ + int binary; + /* For a decimal scale factor, the scale factor is 10 ** decimal. */ + int decimal; + /* For an arbitrary scale factor, the scale factor is the ratio + numerator / denominator. */ + struct { tree numerator; tree denominator; } arbitrary; + } scale_factor; +}; + +void dwarf2out_cc_finalize (void); + +/* Some DWARF internals are exposed for the needs of DWARF-based debug + formats. */ + +/* Each DIE attribute has a field specifying the attribute kind, + a link to the next attribute in the chain, and an attribute value. + Attributes are typically linked below the DIE they modify. */ + +typedef struct GTY(()) dw_attr_struct { + enum dwarf_attribute dw_attr; + dw_val_node dw_attr_val; +} +dw_attr_node; + +extern dw_attr_node *get_AT (dw_die_ref, enum dwarf_attribute); +extern HOST_WIDE_INT AT_int (dw_attr_node *); +extern unsigned HOST_WIDE_INT AT_unsigned (dw_attr_node *a); +extern dw_loc_descr_ref AT_loc (dw_attr_node *); +extern dw_die_ref get_AT_ref (dw_die_ref, enum dwarf_attribute); +extern const char *get_AT_string (dw_die_ref, enum dwarf_attribute); +extern enum dw_val_class AT_class (dw_attr_node *); +extern unsigned HOST_WIDE_INT AT_unsigned (dw_attr_node *); +extern unsigned get_AT_unsigned (dw_die_ref, enum dwarf_attribute); +extern int get_AT_flag (dw_die_ref, enum dwarf_attribute); + +extern void add_name_attribute (dw_die_ref, const char *); + +extern dw_die_ref new_die_raw (enum dwarf_tag); +extern dw_die_ref base_type_die (tree, bool); + +extern dw_die_ref lookup_decl_die (tree); +extern dw_die_ref lookup_type_die (tree); + +extern dw_die_ref dw_get_die_child (dw_die_ref); +extern dw_die_ref dw_get_die_sib (dw_die_ref); +extern enum dwarf_tag dw_get_die_tag (dw_die_ref); + +/* Data about a single source file. */ +struct GTY((for_user)) dwarf_file_data { + const char * key; + const char * filename; + int emitted_number; +}; + +extern struct dwarf_file_data *get_AT_file (dw_die_ref, + enum dwarf_attribute); + +#endif /* GCC_DWARF2OUT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/edit-context.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/edit-context.h new file mode 100644 index 0000000..383c181 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/edit-context.h @@ -0,0 +1,67 @@ +/* Determining the results of applying fix-it hints. + Copyright (C) 2016-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_EDIT_CONTEXT_H +#define GCC_EDIT_CONTEXT_H + +#include "typed-splay-tree.h" + +class edit_context; +class edited_file; + +/* A set of changes to the source code. + + The changes are "atomic" - if any changes can't be applied, + none of them can be (tracked by the m_valid flag). + Similarly, attempts to add the changes from a rich_location flagged + as containing invalid changes mean that the whole of the edit_context + is flagged as invalid. + + A complication here is that fix-its are expressed relative to coordinates + in the files when they were parsed, before any changes have been made, and + so if there's more that one fix-it to be applied, we have to adjust + later fix-its to allow for the changes made by earlier ones. This + is done by the various "get_effective_column" methods. */ + +class edit_context +{ + public: + edit_context (); + + bool valid_p () const { return m_valid; } + + void add_fixits (rich_location *richloc); + + char *get_content (const char *filename); + + int get_effective_column (const char *filename, int line, int column); + + char *generate_diff (bool show_filenames); + void print_diff (pretty_printer *pp, bool show_filenames); + + private: + bool apply_fixit (const fixit_hint *hint); + edited_file *get_file (const char *filename); + edited_file &get_or_insert_file (const char *filename); + + bool m_valid; + typed_splay_tree m_files; +}; + +#endif /* GCC_EDIT_CONTEXT_H. */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/emit-rtl.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/emit-rtl.h new file mode 100644 index 0000000..7a58fed --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/emit-rtl.h @@ -0,0 +1,548 @@ +/* Exported functions from emit-rtl.cc + Copyright (C) 2004-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_EMIT_RTL_H +#define GCC_EMIT_RTL_H + +class temp_slot; +typedef class temp_slot *temp_slot_p; +class predefined_function_abi; +namespace rtl_ssa { class function_info; } + +/* Information mainlined about RTL representation of incoming arguments. */ +struct GTY(()) incoming_args { + /* Number of bytes of args popped by function being compiled on its return. + Zero if no bytes are to be popped. + May affect compilation of return insn or of function epilogue. */ + poly_int64_pod pops_args; + + /* If function's args have a fixed size, this is that size, in bytes. + Otherwise, it is -1. + May affect compilation of return insn or of function epilogue. */ + poly_int64_pod size; + + /* # bytes the prologue should push and pretend that the caller pushed them. + The prologue must do this, but only if parms can be passed in + registers. */ + int pretend_args_size; + + /* This is the offset from the arg pointer to the place where the first + anonymous arg can be found, if there is one. */ + rtx arg_offset_rtx; + + /* Quantities of various kinds of registers + used for the current function's args. */ + CUMULATIVE_ARGS info; + + /* The arg pointer hard register, or the pseudo into which it was copied. */ + rtx internal_arg_pointer; +}; + + +/* Datastructures maintained for currently processed function in RTL form. */ +struct GTY(()) rtl_data { + void init_stack_alignment (); + + struct expr_status expr; + struct emit_status emit; + struct varasm_status varasm; + struct incoming_args args; + struct function_subsections subsections; + struct rtl_eh eh; + + /* The ABI of the function, i.e. the interface it presents to its callers. + This is the ABI that should be queried to see which registers the + function needs to save before it uses them. + + Other functions (including those called by this function) might use + different ABIs. */ + const predefined_function_abi *GTY((skip)) abi; + + rtl_ssa::function_info *GTY((skip)) ssa; + + /* For function.cc */ + + /* # of bytes of outgoing arguments. If ACCUMULATE_OUTGOING_ARGS is + defined, the needed space is pushed by the prologue. */ + poly_int64_pod outgoing_args_size; + + /* If nonzero, an RTL expression for the location at which the current + function returns its result. If the current function returns its + result in a register, current_function_return_rtx will always be + the hard register containing the result. */ + rtx return_rtx; + + /* Vector of initial-value pairs. Each pair consists of a pseudo + register of approprite mode that stores the initial value a hard + register REGNO, and that hard register itself. */ + /* ??? This could be a VEC but there is currently no way to define an + opaque VEC type. */ + struct initial_value_struct *hard_reg_initial_vals; + + /* A variable living at the top of the frame that holds a known value. + Used for detecting stack clobbers. */ + tree stack_protect_guard; + + /* The __stack_chk_guard variable or expression holding the stack + protector canary value. */ + tree stack_protect_guard_decl; + + /* List (chain of INSN_LIST) of labels heading the current handlers for + nonlocal gotos. */ + rtx_insn_list *x_nonlocal_goto_handler_labels; + + /* Label that will go on function epilogue. + Jumping to this label serves as a "return" instruction + on machines which require execution of the epilogue on all returns. */ + rtx_code_label *x_return_label; + + /* Label that will go on the end of function epilogue. + Jumping to this label serves as a "naked return" instruction + on machines which require execution of the epilogue on all returns. */ + rtx_code_label *x_naked_return_label; + + /* List (chain of EXPR_LISTs) of all stack slots in this function. + Made for the sake of unshare_all_rtl. */ + vec *x_stack_slot_list; + + /* List of empty areas in the stack frame. */ + class frame_space *frame_space_list; + + /* Place after which to insert the tail_recursion_label if we need one. */ + rtx_note *x_stack_check_probe_note; + + /* Location at which to save the argument pointer if it will need to be + referenced. There are two cases where this is done: if nonlocal gotos + exist, or if vars stored at an offset from the argument pointer will be + needed by inner routines. */ + rtx x_arg_pointer_save_area; + + /* Dynamic Realign Argument Pointer used for realigning stack. */ + rtx drap_reg; + + /* Offset to end of allocated area of stack frame. + If stack grows down, this is the address of the last stack slot allocated. + If stack grows up, this is the address for the next slot. */ + poly_int64_pod x_frame_offset; + + /* Insn after which register parms and SAVE_EXPRs are born, if nonopt. */ + rtx_insn *x_parm_birth_insn; + + /* List of all used temporaries allocated, by level. */ + vec *x_used_temp_slots; + + /* List of available temp slots. */ + class temp_slot *x_avail_temp_slots; + + /* Current nesting level for temporaries. */ + int x_temp_slot_level; + + /* The largest alignment needed on the stack, including requirement + for outgoing stack alignment. */ + unsigned int stack_alignment_needed; + + /* Preferred alignment of the end of stack frame, which is preferred + to call other functions. */ + unsigned int preferred_stack_boundary; + + /* The minimum alignment of parameter stack. */ + unsigned int parm_stack_boundary; + + /* The largest alignment of slot allocated on the stack. */ + unsigned int max_used_stack_slot_alignment; + + /* The stack alignment estimated before reload, with consideration of + following factors: + 1. Alignment of local stack variables (max_used_stack_slot_alignment) + 2. Alignment requirement to call other functions + (preferred_stack_boundary) + 3. Alignment of non-local stack variables but might be spilled in + local stack. */ + unsigned int stack_alignment_estimated; + + /* How many NOP insns to place at each function entry by default. */ + unsigned short patch_area_size; + + /* How far the real asm entry point is into this area. */ + unsigned short patch_area_entry; + + /* For reorg. */ + + /* Nonzero if function being compiled called builtin_return_addr or + builtin_frame_address with nonzero count. */ + bool accesses_prior_frames; + + /* Nonzero if the function calls __builtin_eh_return. */ + bool calls_eh_return; + + /* Nonzero if function saves all registers, e.g. if it has a nonlocal + label that can reach the exit block via non-exceptional paths. */ + bool saves_all_registers; + + /* Nonzero if function being compiled has nonlocal gotos to parent + function. */ + bool has_nonlocal_goto; + + /* Nonzero if function being compiled has an asm statement. */ + bool has_asm_statement; + + /* This bit is used by the exception handling logic. It is set if all + calls (if any) are sibling calls. Such functions do not have to + have EH tables generated, as they cannot throw. A call to such a + function, however, should be treated as throwing if any of its callees + can throw. */ + bool all_throwers_are_sibcalls; + + /* Nonzero if stack limit checking should be enabled in the current + function. */ + bool limit_stack; + + /* Nonzero if profiling code should be generated. */ + bool profile; + + /* Nonzero if the current function uses the constant pool. */ + bool uses_const_pool; + + /* Nonzero if the current function uses pic_offset_table_rtx. */ + bool uses_pic_offset_table; + + /* Nonzero if the current function needs an lsda for exception handling. */ + bool uses_eh_lsda; + + /* Set when the tail call has been produced. */ + bool tail_call_emit; + + /* Nonzero if code to initialize arg_pointer_save_area has been emitted. */ + bool arg_pointer_save_area_init; + + /* Nonzero if current function must be given a frame pointer. + Set in reload1.cc or lra-eliminations.cc if anything is allocated + on the stack there. */ + bool frame_pointer_needed; + + /* When set, expand should optimize for speed. */ + bool maybe_hot_insn_p; + + /* Nonzero if function stack realignment is needed. This flag may be + set twice: before and after reload. It is set before reload wrt + stack alignment estimation before reload. It will be changed after + reload if by then criteria of stack realignment is different. + The value set after reload is the accurate one and is finalized. */ + bool stack_realign_needed; + + /* Nonzero if function stack realignment is tried. This flag is set + only once before reload. It affects register elimination. This + is used to generate DWARF debug info for stack variables. */ + bool stack_realign_tried; + + /* Nonzero if function being compiled needs dynamic realigned + argument pointer (drap) if stack needs realigning. */ + bool need_drap; + + /* Nonzero if function stack realignment estimation is done, namely + stack_realign_needed flag has been set before reload wrt estimated + stack alignment info. */ + bool stack_realign_processed; + + /* Nonzero if function stack realignment has been finalized, namely + stack_realign_needed flag has been set and finalized after reload. */ + bool stack_realign_finalized; + + /* True if dbr_schedule has already been called for this function. */ + bool dbr_scheduled_p; + + /* True if current function cannot throw. Unlike + TREE_NOTHROW (current_function_decl) it is set even for overwritable + function where currently compiled version of it is nothrow. */ + bool nothrow; + + /* True if we performed shrink-wrapping for the current function. */ + bool shrink_wrapped; + + /* True if we performed shrink-wrapping for separate components for + the current function. */ + bool shrink_wrapped_separate; + + /* Nonzero if function being compiled doesn't modify the stack pointer + (ignoring the prologue and epilogue). This is only valid after + pass_stack_ptr_mod has run. */ + bool sp_is_unchanging; + + /* True if the stack pointer is clobbered by asm statement. */ + bool sp_is_clobbered_by_asm; + + /* Nonzero if function being compiled doesn't contain any calls + (ignoring the prologue and epilogue). This is set prior to + register allocation in IRA and is valid for the remaining + compiler passes. */ + bool is_leaf; + + /* Nonzero if the function being compiled is a leaf function which only + uses leaf registers. This is valid after reload (specifically after + sched2) and is useful only if the port defines LEAF_REGISTERS. */ + bool uses_only_leaf_regs; + + /* Nonzero if the function being compiled has undergone hot/cold partitioning + (under flag_reorder_blocks_and_partition) and has at least one cold + block. */ + bool has_bb_partition; + + /* Nonzero if the function being compiled has completed the bb reordering + pass. */ + bool bb_reorder_complete; + + /* Like regs_ever_live, but 1 if a reg is set or clobbered from an + asm. Unlike regs_ever_live, elements of this array corresponding + to eliminable regs (like the frame pointer) are set if an asm + sets them. */ + HARD_REG_SET asm_clobbers; + + /* All hard registers that need to be zeroed at the return of the routine. */ + HARD_REG_SET must_be_zero_on_return; + + /* The highest address seen during shorten_branches. */ + int max_insn_address; +}; + +#define return_label (crtl->x_return_label) +#define naked_return_label (crtl->x_naked_return_label) +#define stack_slot_list (crtl->x_stack_slot_list) +#define parm_birth_insn (crtl->x_parm_birth_insn) +#define frame_offset (crtl->x_frame_offset) +#define stack_check_probe_note (crtl->x_stack_check_probe_note) +#define arg_pointer_save_area (crtl->x_arg_pointer_save_area) +#define used_temp_slots (crtl->x_used_temp_slots) +#define avail_temp_slots (crtl->x_avail_temp_slots) +#define temp_slot_level (crtl->x_temp_slot_level) +#define nonlocal_goto_handler_labels (crtl->x_nonlocal_goto_handler_labels) +#define frame_pointer_needed (crtl->frame_pointer_needed) +#define stack_realign_fp (crtl->stack_realign_needed && !crtl->need_drap) +#define stack_realign_drap (crtl->stack_realign_needed && crtl->need_drap) + +extern GTY(()) struct rtl_data x_rtl; + +/* Accessor to RTL datastructures. We keep them statically allocated now since + we never keep multiple functions. For threaded compiler we might however + want to do differently. */ +#define crtl (&x_rtl) + +/* Return whether two MEM_ATTRs are equal. */ +bool mem_attrs_eq_p (const class mem_attrs *, const class mem_attrs *); + +/* Set the alias set of MEM to SET. */ +extern void set_mem_alias_set (rtx, alias_set_type); + +/* Set the alignment of MEM to ALIGN bits. */ +extern void set_mem_align (rtx, unsigned int); + +/* Set the address space of MEM to ADDRSPACE. */ +extern void set_mem_addr_space (rtx, addr_space_t); + +/* Set the expr for MEM to EXPR. */ +extern void set_mem_expr (rtx, tree); + +/* Set the offset for MEM to OFFSET. */ +extern void set_mem_offset (rtx, poly_int64); + +/* Clear the offset recorded for MEM. */ +extern void clear_mem_offset (rtx); + +/* Set the size for MEM to SIZE. */ +extern void set_mem_size (rtx, poly_int64); + +/* Clear the size recorded for MEM. */ +extern void clear_mem_size (rtx); + +/* Set the attributes for MEM appropriate for a spill slot. */ +extern void set_mem_attrs_for_spill (rtx); +extern tree get_spill_slot_decl (bool); + +/* Return a memory reference like MEMREF, but with its address changed to + ADDR. The caller is asserting that the actual piece of memory pointed + to is the same, just the form of the address is being changed, such as + by putting something into a register. */ +extern rtx replace_equiv_address (rtx, rtx, bool = false); + +/* Likewise, but the reference is not required to be valid. */ +extern rtx replace_equiv_address_nv (rtx, rtx, bool = false); + +extern rtx gen_blockage (void); +extern rtvec gen_rtvec (int, ...); +extern rtx copy_insn_1 (rtx); +extern rtx copy_insn (rtx); +extern rtx_insn *copy_delay_slot_insn (rtx_insn *); +extern rtx gen_int_mode (poly_int64, machine_mode); +extern rtx_insn *emit_copy_of_insn_after (rtx_insn *, rtx_insn *); +extern void set_reg_attrs_from_value (rtx, rtx); +extern void set_reg_attrs_for_parm (rtx, rtx); +extern void set_reg_attrs_for_decl_rtl (tree t, rtx x); +extern void adjust_reg_mode (rtx, machine_mode); +extern int mem_expr_equal_p (const_tree, const_tree); +extern rtx gen_int_shift_amount (machine_mode, poly_int64); + +extern bool need_atomic_barrier_p (enum memmodel, bool); + +/* Return the current sequence. */ + +static inline struct sequence_stack * +get_current_sequence (void) +{ + return &crtl->emit.seq; +} + +/* Return the outermost sequence. */ + +static inline struct sequence_stack * +get_topmost_sequence (void) +{ + struct sequence_stack *seq, *top; + + seq = get_current_sequence (); + do + { + top = seq; + seq = seq->next; + } while (seq); + return top; +} + +/* Return the first insn of the current sequence or current function. */ + +static inline rtx_insn * +get_insns (void) +{ + return get_current_sequence ()->first; +} + +/* Specify a new insn as the first in the chain. */ + +static inline void +set_first_insn (rtx_insn *insn) +{ + gcc_checking_assert (!insn || !PREV_INSN (insn)); + get_current_sequence ()->first = insn; +} + +/* Return the last insn emitted in current sequence or current function. */ + +static inline rtx_insn * +get_last_insn (void) +{ + return get_current_sequence ()->last; +} + +/* Specify a new insn as the last in the chain. */ + +static inline void +set_last_insn (rtx_insn *insn) +{ + gcc_checking_assert (!insn || !NEXT_INSN (insn)); + get_current_sequence ()->last = insn; +} + +/* Return a number larger than any instruction's uid in this function. */ + +static inline int +get_max_uid (void) +{ + return crtl->emit.x_cur_insn_uid; +} + +extern bool valid_for_const_vector_p (machine_mode, rtx); +extern rtx gen_const_vec_duplicate (machine_mode, rtx); +extern rtx gen_vec_duplicate (machine_mode, rtx); + +extern rtx gen_const_vec_series (machine_mode, rtx, rtx); +extern rtx gen_vec_series (machine_mode, rtx, rtx); + +extern void set_decl_incoming_rtl (tree, rtx, bool); + +/* Return a memory reference like MEMREF, but with its mode changed + to MODE and its address changed to ADDR. + (VOIDmode means don't change the mode. + NULL for ADDR means don't change the address.) */ +extern rtx change_address (rtx, machine_mode, rtx); + +/* Return a memory reference like MEMREF, but with its mode changed + to MODE and its address offset by OFFSET bytes. */ +#define adjust_address(MEMREF, MODE, OFFSET) \ + adjust_address_1 (MEMREF, MODE, OFFSET, 1, 1, 0, 0) + +/* Likewise, but the reference is not required to be valid. */ +#define adjust_address_nv(MEMREF, MODE, OFFSET) \ + adjust_address_1 (MEMREF, MODE, OFFSET, 0, 1, 0, 0) + +/* Return a memory reference like MEMREF, but with its mode changed + to MODE and its address offset by OFFSET bytes. Assume that it's + for a bitfield and conservatively drop the underlying object if we + cannot be sure to stay within its bounds. */ +#define adjust_bitfield_address(MEMREF, MODE, OFFSET) \ + adjust_address_1 (MEMREF, MODE, OFFSET, 1, 1, 1, 0) + +/* As for adjust_bitfield_address, but specify that the width of + BLKmode accesses is SIZE bytes. */ +#define adjust_bitfield_address_size(MEMREF, MODE, OFFSET, SIZE) \ + adjust_address_1 (MEMREF, MODE, OFFSET, 1, 1, 1, SIZE) + +/* Likewise, but the reference is not required to be valid. */ +#define adjust_bitfield_address_nv(MEMREF, MODE, OFFSET) \ + adjust_address_1 (MEMREF, MODE, OFFSET, 0, 1, 1, 0) + +/* Return a memory reference like MEMREF, but with its mode changed + to MODE and its address changed to ADDR, which is assumed to be + increased by OFFSET bytes from MEMREF. */ +#define adjust_automodify_address(MEMREF, MODE, ADDR, OFFSET) \ + adjust_automodify_address_1 (MEMREF, MODE, ADDR, OFFSET, 1) + +/* Likewise, but the reference is not required to be valid. */ +#define adjust_automodify_address_nv(MEMREF, MODE, ADDR, OFFSET) \ + adjust_automodify_address_1 (MEMREF, MODE, ADDR, OFFSET, 0) + +extern rtx adjust_address_1 (rtx, machine_mode, poly_int64, int, int, + int, poly_int64); +extern rtx adjust_automodify_address_1 (rtx, machine_mode, rtx, + poly_int64, int); + +/* Return a memory reference like MEMREF, but whose address is changed by + adding OFFSET, an RTX, to it. POW2 is the highest power of two factor + known to be in OFFSET (possibly 1). */ +extern rtx offset_address (rtx, rtx, unsigned HOST_WIDE_INT); + +/* Given REF, a MEM, and T, either the type of X or the expression + corresponding to REF, set the memory attributes. OBJECTP is nonzero + if we are making a new object of this type. */ +extern void set_mem_attributes (rtx, tree, int); + +/* Similar, except that BITPOS has not yet been applied to REF, so if + we alter MEM_OFFSET according to T then we should subtract BITPOS + expecting that it'll be added back in later. */ +extern void set_mem_attributes_minus_bitpos (rtx, tree, int, poly_int64); + +/* Return OFFSET if XEXP (MEM, 0) - OFFSET is known to be ALIGN + bits aligned for 0 <= OFFSET < ALIGN / BITS_PER_UNIT, or + -1 if not known. */ +extern int get_mem_align_offset (rtx, unsigned int); + +/* Return a memory reference like MEMREF, but with its mode widened to + MODE and adjusted by OFFSET. */ +extern rtx widen_memory_access (rtx, machine_mode, poly_int64); + +extern void maybe_set_max_label_num (rtx_code_label *x); + +#endif /* GCC_EMIT_RTL_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/errors.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/errors.h new file mode 100644 index 0000000..994d9c2 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/errors.h @@ -0,0 +1,40 @@ +/* Basic error reporting routines. + Copyright (C) 1999-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* warning, error, and fatal. These definitions are suitable for use + in the generator programs; eventually we would like to use them in + cc1 too, but that's a longer term project. + + N.B. We cannot presently use ATTRIBUTE_PRINTF with these functions, + because they can be extended with additional format specifiers which + GCC does not know about. */ + +#ifndef GCC_ERRORS_H +#define GCC_ERRORS_H + +extern void warning (const char *, ...) ATTRIBUTE_PRINTF_1 ATTRIBUTE_COLD; +extern void error (const char *, ...) ATTRIBUTE_PRINTF_1 ATTRIBUTE_COLD; +extern void fatal (const char *, ...) ATTRIBUTE_NORETURN ATTRIBUTE_PRINTF_1 ATTRIBUTE_COLD; +extern void internal_error (const char *, ...) ATTRIBUTE_NORETURN ATTRIBUTE_PRINTF_1 ATTRIBUTE_COLD; +extern const char *trim_filename (const char *); + +extern int have_error; +extern const char *progname; + +#endif /* ! GCC_ERRORS_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/escaped_string.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/escaped_string.h new file mode 100644 index 0000000..fd9a6fc --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/escaped_string.h @@ -0,0 +1,43 @@ +/* Shared escaped string class. + Copyright (C) 1999-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_ESCAPED_STRING_H +#define GCC_ESCAPED_STRING_H + +#include + +/* A class to handle converting a string that might contain + control characters, (eg newline, form-feed, etc), into one + in which contains escape sequences instead. */ + +class escaped_string +{ + public: + escaped_string () { m_owned = false; m_str = NULL; }; + ~escaped_string () { if (m_owned) free (m_str); } + operator const char *() const { return m_str; } + void escape (const char *); + private: + escaped_string(const escaped_string&) {} + escaped_string& operator=(const escaped_string&) { return *this; } + char *m_str; + bool m_owned; +}; + +#endif /* ! GCC_ESCAPED_STRING_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/et-forest.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/et-forest.h new file mode 100644 index 0000000..5dafea3 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/et-forest.h @@ -0,0 +1,85 @@ +/* Et-forest data structure implementation. + Copyright (C) 2002-2022 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING3. If not see + . */ + +/* This package implements ET forest data structure. Each tree in + the structure maintains a tree structure and offers logarithmic time + for tree operations (insertion and removal of nodes and edges) and + poly-logarithmic time for nearest common ancestor. + + ET tree stores its structure as a sequence of symbols obtained + by dfs(root) + + dfs (node) + { + s = node; + for each child c of node do + s = concat (s, c, node); + return s; + } + + For example for tree + + 1 + / | \ + 2 3 4 + / | + 4 5 + + the sequence is 1 2 4 2 5 3 1 3 1 4 1. + + The sequence is stored in a slightly modified splay tree. + In order to support various types of node values, a hashtable + is used to convert node values to the internal representation. */ + +#ifndef _ET_TREE_H +#define _ET_TREE_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* The node representing the node in an et tree. */ +struct et_node +{ + void *data; /* The data represented by the node. */ + + int dfs_num_in, dfs_num_out; /* Number of the node in the dfs ordering. */ + + struct et_node *father; /* Father of the node. */ + struct et_node *son; /* The first of the sons of the node. */ + struct et_node *left; + struct et_node *right; /* The brothers of the node. */ + + struct et_occ *rightmost_occ; /* The rightmost occurrence. */ + struct et_occ *parent_occ; /* The occurrence of the parent node. */ +}; + +struct et_node *et_new_tree (void *data); +void et_free_tree (struct et_node *); +void et_free_tree_force (struct et_node *); +void et_free_pools (void); +void et_set_father (struct et_node *, struct et_node *); +void et_split (struct et_node *); +struct et_node *et_nca (struct et_node *, struct et_node *); +bool et_below (struct et_node *, struct et_node *); +struct et_node *et_root (struct et_node *); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _ET_TREE_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/except.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/except.h new file mode 100644 index 0000000..b7fd0f4 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/except.h @@ -0,0 +1,334 @@ +/* Exception Handling interface routines. + Copyright (C) 1996-2022 Free Software Foundation, Inc. + Contributed by Mike Stump . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* No include guards here, but define an include file marker anyway, so + that the compiler can keep track of where this file is included. This + is e.g. used to avoid including this file in front-end specific files. */ +#ifndef GCC_EXCEPT_H +#define GCC_EXCEPT_H + + +struct function; +struct eh_region_d; + +/* The type of an exception region. */ +enum eh_region_type +{ + /* CLEANUP regions implement e.g. destructors run when exiting a block. + They can be generated from both GIMPLE_TRY_FINALLY and GIMPLE_TRY_CATCH + nodes. It is expected by the runtime that cleanup regions will *not* + resume normal program flow, but will continue propagation of the + exception. */ + ERT_CLEANUP, + + /* TRY regions implement catching an exception. The list of types associated + with the attached catch handlers is examined in order by the runtime and + control is transferred to the appropriate handler. Note that a NULL type + list is a catch-all handler, and that it will catch *all* exceptions + including those originating from a different language. */ + ERT_TRY, + + /* ALLOWED_EXCEPTIONS regions implement exception filtering, e.g. the + throw(type-list) specification that can be added to C++ functions. + The runtime examines the thrown exception vs the type list, and if + the exception does not match, transfers control to the handler. The + normal handler for C++ calls __cxa_call_unexpected. */ + ERT_ALLOWED_EXCEPTIONS, + + /* MUST_NOT_THROW regions prevent all exceptions from propagating. This + region type is used in C++ to surround destructors being run inside a + CLEANUP region. This differs from an ALLOWED_EXCEPTIONS region with + an empty type list in that the runtime is prepared to terminate the + program directly. We only generate code for MUST_NOT_THROW regions + along control paths that are already handling an exception within the + current function. */ + ERT_MUST_NOT_THROW +}; + + +/* A landing pad for a given exception region. Any transfer of control + from the EH runtime to the function happens at a landing pad. */ + +struct GTY(()) eh_landing_pad_d +{ + /* The linked list of all landing pads associated with the region. */ + struct eh_landing_pad_d *next_lp; + + /* The region with which this landing pad is associated. */ + struct eh_region_d *region; + + /* At the gimple level, the location to which control will be transferred + for this landing pad. There can be both EH and normal edges into the + block containing the post-landing-pad label. */ + tree post_landing_pad; + + /* At the rtl level, the location to which the runtime will transfer + control. This differs from the post-landing-pad in that the target's + EXCEPTION_RECEIVER pattern will be expanded here, as well as other + bookkeeping specific to exceptions. There must not be normal edges + into the block containing the landing-pad label. */ + rtx_code_label *landing_pad; + + /* The index of this landing pad within fun->eh->lp_array. */ + int index; +}; + +/* A catch handler associated with an ERT_TRY region. */ + +struct GTY(()) eh_catch_d +{ + /* The double-linked list of all catch handlers for the region. */ + struct eh_catch_d *next_catch; + struct eh_catch_d *prev_catch; + + /* A TREE_LIST of runtime type objects that this catch handler + will catch, or NULL if all exceptions are caught. */ + tree type_list; + + /* A TREE_LIST of INTEGER_CSTs that correspond to the type_list entries, + having been mapped by assign_filter_values. These integers are to be + compared against the __builtin_eh_filter value. */ + tree filter_list; + + /* The code that should be executed if this catch handler matches the + thrown exception. This label is only maintained until + pass_lower_eh_dispatch, at which point it is cleared. */ + tree label; +}; + +/* Describes one exception region. */ + +struct GTY(()) eh_region_d +{ + /* The immediately surrounding region. */ + struct eh_region_d *outer; + + /* The list of immediately contained regions. */ + struct eh_region_d *inner; + struct eh_region_d *next_peer; + + /* The index of this region within fun->eh->region_array. */ + int index; + + /* Each region does exactly one thing. */ + enum eh_region_type type; + + /* Holds the action to perform based on the preceding type. */ + union eh_region_u { + struct eh_region_u_try { + /* The double-linked list of all catch handlers for this region. */ + struct eh_catch_d *first_catch; + struct eh_catch_d *last_catch; + } GTY ((tag ("ERT_TRY"))) eh_try; + + struct eh_region_u_allowed { + /* A TREE_LIST of runtime type objects allowed to pass. */ + tree type_list; + /* The code that should be executed if the thrown exception does + not match the type list. This label is only maintained until + pass_lower_eh_dispatch, at which point it is cleared. */ + tree label; + /* The integer that will be passed by the runtime to signal that + we should execute the code at LABEL. This integer is assigned + by assign_filter_values and is to be compared against the + __builtin_eh_filter value. */ + int filter; + } GTY ((tag ("ERT_ALLOWED_EXCEPTIONS"))) allowed; + + struct eh_region_u_must_not_throw { + /* A function decl to be invoked if this region is actually reachable + from within the function, rather than implementable from the runtime. + The normal way for this to happen is for there to be a CLEANUP region + contained within this MUST_NOT_THROW region. Note that if the + runtime handles the MUST_NOT_THROW region, we have no control over + what termination function is called; it will be decided by the + personality function in effect for this CIE. */ + tree failure_decl; + /* The location assigned to the call of FAILURE_DECL, if expanded. */ + location_t failure_loc; + } GTY ((tag ("ERT_MUST_NOT_THROW"))) must_not_throw; + } GTY ((desc ("%0.type"))) u; + + /* The list of landing pads associated with this region. */ + struct eh_landing_pad_d *landing_pads; + + /* EXC_PTR and FILTER values copied from the runtime for this region. + Each region gets its own psuedos so that if there are nested exceptions + we do not overwrite the values of the first exception. */ + rtx exc_ptr_reg, filter_reg; + + /* True if this region should use __cxa_end_cleanup instead + of _Unwind_Resume. */ + bool use_cxa_end_cleanup; +}; + +typedef struct eh_landing_pad_d *eh_landing_pad; +typedef struct eh_catch_d *eh_catch; +typedef struct eh_region_d *eh_region; + + + + +/* The exception status for each function. */ + +struct GTY(()) eh_status +{ + /* The tree of all regions for this function. */ + eh_region region_tree; + + /* The same information as an indexable array. */ + vec *region_array; + + /* The landing pads as an indexable array. */ + vec *lp_array; + + /* At the gimple level, a mapping from gimple statement to landing pad + or must-not-throw region. See record_stmt_eh_region. */ + hash_map *GTY(()) throw_stmt_table; + + /* All of the runtime type data used by the function. These objects + are emitted to the lang-specific-data-area for the function. */ + vec *ttype_data; + + /* The table of all action chains. These encode the eh_region tree in + a compact form for use by the runtime, and is also emitted to the + lang-specific-data-area. Note that the ARM EABI uses a different + format for the encoding than all other ports. */ + union eh_status_u { + vec *GTY((tag ("1"))) arm_eabi; + vec *GTY((tag ("0"))) other; + } GTY ((desc ("targetm.arm_eabi_unwinder"))) ehspec_data; +}; + + +/* Invokes CALLBACK for every exception handler label. Only used by old + loop hackery; should not be used by new code. */ +extern void for_each_eh_label (void (*) (rtx)); + +extern void init_eh_for_function (void); + +extern void remove_eh_landing_pad (eh_landing_pad); +extern void remove_eh_handler (eh_region); +extern void remove_unreachable_eh_regions (sbitmap); + +extern bool current_function_has_exception_handlers (void); +extern void output_function_exception_table (int); + +extern rtx expand_builtin_eh_pointer (tree); +extern rtx expand_builtin_eh_filter (tree); +extern rtx expand_builtin_eh_copy_values (tree); +extern void expand_builtin_unwind_init (void); +extern rtx expand_builtin_eh_return_data_regno (tree); +extern rtx expand_builtin_extract_return_addr (tree); +extern void expand_builtin_init_dwarf_reg_sizes (tree); +extern rtx expand_builtin_frob_return_addr (tree); +extern rtx expand_builtin_dwarf_sp_column (void); +extern void expand_builtin_eh_return (tree, tree); +extern void expand_eh_return (void); +extern rtx expand_builtin_extend_pointer (tree); + +typedef tree (*duplicate_eh_regions_map) (tree, void *); +extern hash_map *duplicate_eh_regions + (struct function *, eh_region, int, duplicate_eh_regions_map, void *); + +extern void sjlj_emit_function_exit_after (rtx_insn *); +extern void update_sjlj_context (void); + +extern eh_region gen_eh_region_cleanup (eh_region); +extern eh_region gen_eh_region_try (eh_region); +extern eh_region gen_eh_region_allowed (eh_region, tree); +extern eh_region gen_eh_region_must_not_throw (eh_region); + +extern eh_catch gen_eh_region_catch (eh_region, tree); +extern eh_landing_pad gen_eh_landing_pad (eh_region); + +extern eh_region get_eh_region_from_number_fn (struct function *, int); +extern eh_region get_eh_region_from_number (int); +extern eh_landing_pad get_eh_landing_pad_from_number_fn (struct function*,int); +extern eh_landing_pad get_eh_landing_pad_from_number (int); +extern eh_region get_eh_region_from_lp_number_fn (struct function *, int); +extern eh_region get_eh_region_from_lp_number (int); + +extern eh_region eh_region_outermost (struct function *, eh_region, eh_region); + +extern void make_reg_eh_region_note (rtx_insn *insn, int ecf_flags, int lp_nr); +extern void make_reg_eh_region_note_nothrow_nononlocal (rtx_insn *); + +extern void verify_eh_tree (struct function *); +extern void dump_eh_tree (FILE *, struct function *); +void debug_eh_tree (struct function *); +extern void add_type_for_runtime (tree); +extern tree lookup_type_for_runtime (tree); +extern void assign_filter_values (void); + +extern eh_region get_eh_region_from_rtx (const_rtx); +extern eh_landing_pad get_eh_landing_pad_from_rtx (const_rtx); + +extern void finish_eh_generation (void); + +struct GTY(()) throw_stmt_node { + gimple *stmt; + int lp_nr; +}; + +extern hash_map *get_eh_throw_stmt_table (struct function *); +extern void set_eh_throw_stmt_table (function *, hash_map *); + +enum eh_personality_kind { + eh_personality_none, + eh_personality_any, + eh_personality_lang +}; + +extern enum eh_personality_kind +function_needs_eh_personality (struct function *); + +/* Pre-order iteration within the eh_region tree. */ + +static inline eh_region +ehr_next (eh_region r, eh_region start) +{ + if (r->inner) + r = r->inner; + else if (r->next_peer && r != start) + r = r->next_peer; + else + { + do + { + r = r->outer; + if (r == start) + return NULL; + } + while (r->next_peer == NULL); + r = r->next_peer; + } + return r; +} + +#define FOR_ALL_EH_REGION_AT(R, START) \ + for ((R) = (START); (R) != NULL; (R) = ehr_next (R, START)) + +#define FOR_ALL_EH_REGION_FN(R, FN) \ + for ((R) = (FN)->eh->region_tree; (R) != NULL; (R) = ehr_next (R, NULL)) + +#define FOR_ALL_EH_REGION(R) FOR_ALL_EH_REGION_FN (R, cfun) + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/explow.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/explow.h new file mode 100644 index 0000000..2b9f7e4 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/explow.h @@ -0,0 +1,143 @@ +/* Export function prototypes from explow.cc. + Copyright (C) 2015-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_EXPLOW_H +#define GCC_EXPLOW_H + +/* Return a memory reference like MEMREF, but which is known to have a + valid address. */ +extern rtx validize_mem (rtx); + +extern rtx use_anchored_address (rtx); + +/* Copy given rtx to a new temp reg and return that. */ +extern rtx copy_to_reg (rtx); + +/* Like copy_to_reg but always make the reg Pmode. */ +extern rtx copy_addr_to_reg (rtx); + +/* Like copy_to_reg but always make the reg the specified mode MODE. */ +extern rtx copy_to_mode_reg (machine_mode, rtx); + +/* Copy given rtx to given temp reg and return that. */ +extern rtx copy_to_suggested_reg (rtx, rtx, machine_mode); + +/* Copy a value to a register if it isn't already a register. + Args are mode (in case value is a constant) and the value. */ +extern rtx force_reg (machine_mode, rtx); + +/* Return given rtx, copied into a new temp reg if it was in memory. */ +extern rtx force_not_mem (rtx); + +/* Return mode and signedness to use when an argument or result in the + given mode is promoted. */ +extern machine_mode promote_function_mode (const_tree, machine_mode, int *, + const_tree, int); + +/* Return mode and signedness to use when an object in the given mode + is promoted. */ +extern machine_mode promote_mode (const_tree, machine_mode, int *); + +/* Return mode and signedness to use when object is promoted. */ +machine_mode promote_decl_mode (const_tree, int *); + +/* Return mode and signedness to use when object is promoted. */ +machine_mode promote_ssa_mode (const_tree, int *); + +/* Remove some bytes from the stack. An rtx says how many. */ +extern void adjust_stack (rtx); + +/* Add some bytes to the stack. An rtx says how many. */ +extern void anti_adjust_stack (rtx); + +/* Add some bytes to the stack while probing it. An rtx says how many. */ +extern void anti_adjust_stack_and_probe (rtx, bool); + +/* Add some bytes to the stack while probing it. An rtx says how + many. Add additional probes to prevent stack clashing attacks. */ +extern void anti_adjust_stack_and_probe_stack_clash (rtx); + +/* Support for building allocation/probing loops for stack-clash + protection of dyamically allocated stack space. */ +extern void compute_stack_clash_protection_loop_data (rtx *, rtx *, rtx *, + HOST_WIDE_INT *, rtx); +extern void emit_stack_clash_protection_probe_loop_start (rtx *, rtx *, + rtx, bool); +extern void emit_stack_clash_protection_probe_loop_end (rtx, rtx, + rtx, bool); + +/* This enum is used for the following two functions. */ +enum save_level {SAVE_BLOCK, SAVE_FUNCTION, SAVE_NONLOCAL}; + +/* Save the stack pointer at the specified level. */ +extern void emit_stack_save (enum save_level, rtx *); + +/* Restore the stack pointer from a save area of the specified level. */ +extern void emit_stack_restore (enum save_level, rtx); + +/* Invoke emit_stack_save for the nonlocal_goto_save_area. */ +extern void update_nonlocal_goto_save_area (void); + +/* Record a new stack level. */ +extern void record_new_stack_level (void); + +/* Allocate some space on the stack dynamically and return its address. */ +extern rtx allocate_dynamic_stack_space (rtx, unsigned, unsigned, + HOST_WIDE_INT, bool); + +/* Calculate the necessary size of a constant dynamic stack allocation from the + size of the variable area. */ +extern void get_dynamic_stack_size (rtx *, unsigned, unsigned, HOST_WIDE_INT *); + +/* Returns the address of the dynamic stack space without allocating it. */ +extern rtx get_dynamic_stack_base (poly_int64, unsigned, rtx); + +/* Return an rtx doing runtime alignment to REQUIRED_ALIGN on TARGET. */ +extern rtx align_dynamic_address (rtx, unsigned); + +/* Emit one stack probe at ADDRESS, an address within the stack. */ +extern void emit_stack_probe (rtx); + +/* Probe a range of stack addresses from FIRST to FIRST+SIZE, inclusive. + FIRST is a constant and size is a Pmode RTX. These are offsets from + the current stack pointer. STACK_GROWS_DOWNWARD says whether to add + or subtract them from the stack pointer. */ +extern void probe_stack_range (HOST_WIDE_INT, rtx); + +/* Return an rtx that refers to the value returned by a library call + in its original home. This becomes invalid if any more code is emitted. */ +extern rtx hard_libcall_value (machine_mode, rtx); + +/* Return an rtx that refers to the value returned by a function + in its original home. This becomes invalid if any more code is emitted. */ +extern rtx hard_function_value (const_tree, const_tree, const_tree, int); + +/* Convert arg to a valid memory address for specified machine mode that points + to a specific named address space, by emitting insns to perform arithmetic + if necessary. */ +extern rtx memory_address_addr_space (machine_mode, rtx, addr_space_t); + +extern rtx eliminate_constant_term (rtx, rtx *); + +/* Like memory_address_addr_space, except assume the memory address points to + the generic named address space. */ +#define memory_address(MODE,RTX) \ + memory_address_addr_space ((MODE), (RTX), ADDR_SPACE_GENERIC) + +#endif /* GCC_EXPLOW_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/expmed.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/expmed.h new file mode 100644 index 0000000..ee1ddc8 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/expmed.h @@ -0,0 +1,726 @@ +/* Target-dependent costs for expmed.cc. + Copyright (C) 1987-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef EXPMED_H +#define EXPMED_H 1 + +#include "insn-codes.h" + +enum alg_code { + alg_unknown, + alg_zero, + alg_m, alg_shift, + alg_add_t_m2, + alg_sub_t_m2, + alg_add_factor, + alg_sub_factor, + alg_add_t2_m, + alg_sub_t2_m, + alg_impossible +}; + +/* Indicates the type of fixup needed after a constant multiplication. + BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that + the result should be negated, and ADD_VARIANT means that the + multiplicand should be added to the result. */ +enum mult_variant {basic_variant, negate_variant, add_variant}; + +bool choose_mult_variant (machine_mode, HOST_WIDE_INT, + struct algorithm *, enum mult_variant *, int); + +/* This structure holds the "cost" of a multiply sequence. The + "cost" field holds the total rtx_cost of every operator in the + synthetic multiplication sequence, hence cost(a op b) is defined + as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero. + The "latency" field holds the minimum possible latency of the + synthetic multiply, on a hypothetical infinitely parallel CPU. + This is the critical path, or the maximum height, of the expression + tree which is the sum of rtx_costs on the most expensive path from + any leaf to the root. Hence latency(a op b) is defined as zero for + leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */ + +struct mult_cost { + short cost; /* Total rtx_cost of the multiplication sequence. */ + short latency; /* The latency of the multiplication sequence. */ +}; + +/* This macro is used to compare a pointer to a mult_cost against an + single integer "rtx_cost" value. This is equivalent to the macro + CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */ +#define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \ + || ((X)->cost == (Y) && (X)->latency < (Y))) + +/* This macro is used to compare two pointers to mult_costs against + each other. The macro returns true if X is cheaper than Y. + Currently, the cheaper of two mult_costs is the one with the + lower "cost". If "cost"s are tied, the lower latency is cheaper. */ +#define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \ + || ((X)->cost == (Y)->cost \ + && (X)->latency < (Y)->latency)) + +/* This structure records a sequence of operations. + `ops' is the number of operations recorded. + `cost' is their total cost. + The operations are stored in `op' and the corresponding + logarithms of the integer coefficients in `log'. + + These are the operations: + alg_zero total := 0; + alg_m total := multiplicand; + alg_shift total := total * coeff + alg_add_t_m2 total := total + multiplicand * coeff; + alg_sub_t_m2 total := total - multiplicand * coeff; + alg_add_factor total := total * coeff + total; + alg_sub_factor total := total * coeff - total; + alg_add_t2_m total := total * coeff + multiplicand; + alg_sub_t2_m total := total * coeff - multiplicand; + + The first operand must be either alg_zero or alg_m. */ + +struct algorithm +{ + struct mult_cost cost; + short ops; + /* The size of the OP and LOG fields are not directly related to the + word size, but the worst-case algorithms will be if we have few + consecutive ones or zeros, i.e., a multiplicand like 10101010101... + In that case we will generate shift-by-2, add, shift-by-2, add,..., + in total wordsize operations. */ + enum alg_code op[MAX_BITS_PER_WORD]; + char log[MAX_BITS_PER_WORD]; +}; + +/* The entry for our multiplication cache/hash table. */ +struct alg_hash_entry { + /* The number we are multiplying by. */ + unsigned HOST_WIDE_INT t; + + /* The mode in which we are multiplying something by T. */ + machine_mode mode; + + /* The best multiplication algorithm for t. */ + enum alg_code alg; + + /* The cost of multiplication if ALG_CODE is not alg_impossible. + Otherwise, the cost within which multiplication by T is + impossible. */ + struct mult_cost cost; + + /* Optimized for speed? */ + bool speed; +}; + +/* The number of cache/hash entries. */ +#if HOST_BITS_PER_WIDE_INT == 64 +#define NUM_ALG_HASH_ENTRIES 1031 +#else +#define NUM_ALG_HASH_ENTRIES 307 +#endif + +#define NUM_MODE_IP_INT (NUM_MODE_INT + NUM_MODE_PARTIAL_INT) +#define NUM_MODE_IPV_INT (NUM_MODE_IP_INT + NUM_MODE_VECTOR_INT) + +struct expmed_op_cheap { + bool cheap[2][NUM_MODE_IPV_INT]; +}; + +struct expmed_op_costs { + int cost[2][NUM_MODE_IPV_INT]; +}; + +/* Target-dependent globals. */ +struct target_expmed { + /* Each entry of ALG_HASH caches alg_code for some integer. This is + actually a hash table. If we have a collision, that the older + entry is kicked out. */ + struct alg_hash_entry x_alg_hash[NUM_ALG_HASH_ENTRIES]; + + /* True if x_alg_hash might already have been used. */ + bool x_alg_hash_used_p; + + /* Nonzero means divides or modulus operations are relatively cheap for + powers of two, so don't use branches; emit the operation instead. + Usually, this will mean that the MD file will emit non-branch + sequences. */ + struct expmed_op_cheap x_sdiv_pow2_cheap; + struct expmed_op_cheap x_smod_pow2_cheap; + + /* Cost of various pieces of RTL. Note that some of these are indexed by + shift count and some by mode. */ + int x_zero_cost[2]; + struct expmed_op_costs x_add_cost; + struct expmed_op_costs x_neg_cost; + struct expmed_op_costs x_shift_cost[MAX_BITS_PER_WORD]; + struct expmed_op_costs x_shiftadd_cost[MAX_BITS_PER_WORD]; + struct expmed_op_costs x_shiftsub0_cost[MAX_BITS_PER_WORD]; + struct expmed_op_costs x_shiftsub1_cost[MAX_BITS_PER_WORD]; + struct expmed_op_costs x_mul_cost; + struct expmed_op_costs x_sdiv_cost; + struct expmed_op_costs x_udiv_cost; + int x_mul_widen_cost[2][NUM_MODE_INT]; + int x_mul_highpart_cost[2][NUM_MODE_INT]; + + /* Conversion costs are only defined between two scalar integer modes + of different sizes. The first machine mode is the destination mode, + and the second is the source mode. */ + int x_convert_cost[2][NUM_MODE_IP_INT][NUM_MODE_IP_INT]; +}; + +extern struct target_expmed default_target_expmed; +#if SWITCHABLE_TARGET +extern struct target_expmed *this_target_expmed; +#else +#define this_target_expmed (&default_target_expmed) +#endif + +/* Return a pointer to the alg_hash_entry at IDX. */ + +static inline struct alg_hash_entry * +alg_hash_entry_ptr (int idx) +{ + return &this_target_expmed->x_alg_hash[idx]; +} + +/* Return true if the x_alg_hash field might have been used. */ + +static inline bool +alg_hash_used_p (void) +{ + return this_target_expmed->x_alg_hash_used_p; +} + +/* Set whether the x_alg_hash field might have been used. */ + +static inline void +set_alg_hash_used_p (bool usedp) +{ + this_target_expmed->x_alg_hash_used_p = usedp; +} + +/* Compute an index into the cost arrays by mode class. */ + +static inline int +expmed_mode_index (machine_mode mode) +{ + switch (GET_MODE_CLASS (mode)) + { + case MODE_INT: + return mode - MIN_MODE_INT; + case MODE_PARTIAL_INT: + /* If there are no partial integer modes, help the compiler + to figure out this will never happen. See PR59934. */ + if (MIN_MODE_PARTIAL_INT != VOIDmode) + return mode - MIN_MODE_PARTIAL_INT + NUM_MODE_INT; + break; + case MODE_VECTOR_INT: + /* If there are no vector integer modes, help the compiler + to figure out this will never happen. See PR59934. */ + if (MIN_MODE_VECTOR_INT != VOIDmode) + return mode - MIN_MODE_VECTOR_INT + NUM_MODE_IP_INT; + break; + default: + break; + } + gcc_unreachable (); +} + +/* Return a pointer to a boolean contained in EOC indicating whether + a particular operation performed in MODE is cheap when optimizing + for SPEED. */ + +static inline bool * +expmed_op_cheap_ptr (struct expmed_op_cheap *eoc, bool speed, + machine_mode mode) +{ + int idx = expmed_mode_index (mode); + return &eoc->cheap[speed][idx]; +} + +/* Return a pointer to a cost contained in COSTS when a particular + operation is performed in MODE when optimizing for SPEED. */ + +static inline int * +expmed_op_cost_ptr (struct expmed_op_costs *costs, bool speed, + machine_mode mode) +{ + int idx = expmed_mode_index (mode); + return &costs->cost[speed][idx]; +} + +/* Subroutine of {set_,}sdiv_pow2_cheap. Not to be used otherwise. */ + +static inline bool * +sdiv_pow2_cheap_ptr (bool speed, machine_mode mode) +{ + return expmed_op_cheap_ptr (&this_target_expmed->x_sdiv_pow2_cheap, + speed, mode); +} + +/* Set whether a signed division by a power of 2 is cheap in MODE + when optimizing for SPEED. */ + +static inline void +set_sdiv_pow2_cheap (bool speed, machine_mode mode, bool cheap_p) +{ + *sdiv_pow2_cheap_ptr (speed, mode) = cheap_p; +} + +/* Return whether a signed division by a power of 2 is cheap in MODE + when optimizing for SPEED. */ + +static inline bool +sdiv_pow2_cheap (bool speed, machine_mode mode) +{ + return *sdiv_pow2_cheap_ptr (speed, mode); +} + +/* Subroutine of {set_,}smod_pow2_cheap. Not to be used otherwise. */ + +static inline bool * +smod_pow2_cheap_ptr (bool speed, machine_mode mode) +{ + return expmed_op_cheap_ptr (&this_target_expmed->x_smod_pow2_cheap, + speed, mode); +} + +/* Set whether a signed modulo by a power of 2 is CHEAP in MODE when + optimizing for SPEED. */ + +static inline void +set_smod_pow2_cheap (bool speed, machine_mode mode, bool cheap) +{ + *smod_pow2_cheap_ptr (speed, mode) = cheap; +} + +/* Return whether a signed modulo by a power of 2 is cheap in MODE + when optimizing for SPEED. */ + +static inline bool +smod_pow2_cheap (bool speed, machine_mode mode) +{ + return *smod_pow2_cheap_ptr (speed, mode); +} + +/* Subroutine of {set_,}zero_cost. Not to be used otherwise. */ + +static inline int * +zero_cost_ptr (bool speed) +{ + return &this_target_expmed->x_zero_cost[speed]; +} + +/* Set the COST of loading zero when optimizing for SPEED. */ + +static inline void +set_zero_cost (bool speed, int cost) +{ + *zero_cost_ptr (speed) = cost; +} + +/* Return the COST of loading zero when optimizing for SPEED. */ + +static inline int +zero_cost (bool speed) +{ + return *zero_cost_ptr (speed); +} + +/* Subroutine of {set_,}add_cost. Not to be used otherwise. */ + +static inline int * +add_cost_ptr (bool speed, machine_mode mode) +{ + return expmed_op_cost_ptr (&this_target_expmed->x_add_cost, speed, mode); +} + +/* Set the COST of computing an add in MODE when optimizing for SPEED. */ + +static inline void +set_add_cost (bool speed, machine_mode mode, int cost) +{ + *add_cost_ptr (speed, mode) = cost; +} + +/* Return the cost of computing an add in MODE when optimizing for SPEED. */ + +static inline int +add_cost (bool speed, machine_mode mode) +{ + return *add_cost_ptr (speed, mode); +} + +/* Subroutine of {set_,}neg_cost. Not to be used otherwise. */ + +static inline int * +neg_cost_ptr (bool speed, machine_mode mode) +{ + return expmed_op_cost_ptr (&this_target_expmed->x_neg_cost, speed, mode); +} + +/* Set the COST of computing a negation in MODE when optimizing for SPEED. */ + +static inline void +set_neg_cost (bool speed, machine_mode mode, int cost) +{ + *neg_cost_ptr (speed, mode) = cost; +} + +/* Return the cost of computing a negation in MODE when optimizing for + SPEED. */ + +static inline int +neg_cost (bool speed, machine_mode mode) +{ + return *neg_cost_ptr (speed, mode); +} + +/* Subroutine of {set_,}shift_cost. Not to be used otherwise. */ + +static inline int * +shift_cost_ptr (bool speed, machine_mode mode, int bits) +{ + return expmed_op_cost_ptr (&this_target_expmed->x_shift_cost[bits], + speed, mode); +} + +/* Set the COST of doing a shift in MODE by BITS when optimizing for SPEED. */ + +static inline void +set_shift_cost (bool speed, machine_mode mode, int bits, int cost) +{ + *shift_cost_ptr (speed, mode, bits) = cost; +} + +/* Return the cost of doing a shift in MODE by BITS when optimizing for + SPEED. */ + +static inline int +shift_cost (bool speed, machine_mode mode, int bits) +{ + return *shift_cost_ptr (speed, mode, bits); +} + +/* Subroutine of {set_,}shiftadd_cost. Not to be used otherwise. */ + +static inline int * +shiftadd_cost_ptr (bool speed, machine_mode mode, int bits) +{ + return expmed_op_cost_ptr (&this_target_expmed->x_shiftadd_cost[bits], + speed, mode); +} + +/* Set the COST of doing a shift in MODE by BITS followed by an add when + optimizing for SPEED. */ + +static inline void +set_shiftadd_cost (bool speed, machine_mode mode, int bits, int cost) +{ + *shiftadd_cost_ptr (speed, mode, bits) = cost; +} + +/* Return the cost of doing a shift in MODE by BITS followed by an add + when optimizing for SPEED. */ + +static inline int +shiftadd_cost (bool speed, machine_mode mode, int bits) +{ + return *shiftadd_cost_ptr (speed, mode, bits); +} + +/* Subroutine of {set_,}shiftsub0_cost. Not to be used otherwise. */ + +static inline int * +shiftsub0_cost_ptr (bool speed, machine_mode mode, int bits) +{ + return expmed_op_cost_ptr (&this_target_expmed->x_shiftsub0_cost[bits], + speed, mode); +} + +/* Set the COST of doing a shift in MODE by BITS and then subtracting a + value when optimizing for SPEED. */ + +static inline void +set_shiftsub0_cost (bool speed, machine_mode mode, int bits, int cost) +{ + *shiftsub0_cost_ptr (speed, mode, bits) = cost; +} + +/* Return the cost of doing a shift in MODE by BITS and then subtracting + a value when optimizing for SPEED. */ + +static inline int +shiftsub0_cost (bool speed, machine_mode mode, int bits) +{ + return *shiftsub0_cost_ptr (speed, mode, bits); +} + +/* Subroutine of {set_,}shiftsub1_cost. Not to be used otherwise. */ + +static inline int * +shiftsub1_cost_ptr (bool speed, machine_mode mode, int bits) +{ + return expmed_op_cost_ptr (&this_target_expmed->x_shiftsub1_cost[bits], + speed, mode); +} + +/* Set the COST of subtracting a shift in MODE by BITS from a value when + optimizing for SPEED. */ + +static inline void +set_shiftsub1_cost (bool speed, machine_mode mode, int bits, int cost) +{ + *shiftsub1_cost_ptr (speed, mode, bits) = cost; +} + +/* Return the cost of subtracting a shift in MODE by BITS from a value + when optimizing for SPEED. */ + +static inline int +shiftsub1_cost (bool speed, machine_mode mode, int bits) +{ + return *shiftsub1_cost_ptr (speed, mode, bits); +} + +/* Subroutine of {set_,}mul_cost. Not to be used otherwise. */ + +static inline int * +mul_cost_ptr (bool speed, machine_mode mode) +{ + return expmed_op_cost_ptr (&this_target_expmed->x_mul_cost, speed, mode); +} + +/* Set the COST of doing a multiplication in MODE when optimizing for + SPEED. */ + +static inline void +set_mul_cost (bool speed, machine_mode mode, int cost) +{ + *mul_cost_ptr (speed, mode) = cost; +} + +/* Return the cost of doing a multiplication in MODE when optimizing + for SPEED. */ + +static inline int +mul_cost (bool speed, machine_mode mode) +{ + return *mul_cost_ptr (speed, mode); +} + +/* Subroutine of {set_,}sdiv_cost. Not to be used otherwise. */ + +static inline int * +sdiv_cost_ptr (bool speed, machine_mode mode) +{ + return expmed_op_cost_ptr (&this_target_expmed->x_sdiv_cost, speed, mode); +} + +/* Set the COST of doing a signed division in MODE when optimizing + for SPEED. */ + +static inline void +set_sdiv_cost (bool speed, machine_mode mode, int cost) +{ + *sdiv_cost_ptr (speed, mode) = cost; +} + +/* Return the cost of doing a signed division in MODE when optimizing + for SPEED. */ + +static inline int +sdiv_cost (bool speed, machine_mode mode) +{ + return *sdiv_cost_ptr (speed, mode); +} + +/* Subroutine of {set_,}udiv_cost. Not to be used otherwise. */ + +static inline int * +udiv_cost_ptr (bool speed, machine_mode mode) +{ + return expmed_op_cost_ptr (&this_target_expmed->x_udiv_cost, speed, mode); +} + +/* Set the COST of doing an unsigned division in MODE when optimizing + for SPEED. */ + +static inline void +set_udiv_cost (bool speed, machine_mode mode, int cost) +{ + *udiv_cost_ptr (speed, mode) = cost; +} + +/* Return the cost of doing an unsigned division in MODE when + optimizing for SPEED. */ + +static inline int +udiv_cost (bool speed, machine_mode mode) +{ + return *udiv_cost_ptr (speed, mode); +} + +/* Subroutine of {set_,}mul_widen_cost. Not to be used otherwise. */ + +static inline int * +mul_widen_cost_ptr (bool speed, machine_mode mode) +{ + gcc_assert (GET_MODE_CLASS (mode) == MODE_INT); + + return &this_target_expmed->x_mul_widen_cost[speed][mode - MIN_MODE_INT]; +} + +/* Set the COST for computing a widening multiplication in MODE when + optimizing for SPEED. */ + +static inline void +set_mul_widen_cost (bool speed, machine_mode mode, int cost) +{ + *mul_widen_cost_ptr (speed, mode) = cost; +} + +/* Return the cost for computing a widening multiplication in MODE when + optimizing for SPEED. */ + +static inline int +mul_widen_cost (bool speed, machine_mode mode) +{ + return *mul_widen_cost_ptr (speed, mode); +} + +/* Subroutine of {set_,}mul_highpart_cost. Not to be used otherwise. */ + +static inline int * +mul_highpart_cost_ptr (bool speed, machine_mode mode) +{ + gcc_assert (GET_MODE_CLASS (mode) == MODE_INT); + int m = mode - MIN_MODE_INT; + gcc_assert (m < NUM_MODE_INT); + + return &this_target_expmed->x_mul_highpart_cost[speed][m]; +} + +/* Set the COST for computing the high part of a multiplication in MODE + when optimizing for SPEED. */ + +static inline void +set_mul_highpart_cost (bool speed, machine_mode mode, int cost) +{ + *mul_highpart_cost_ptr (speed, mode) = cost; +} + +/* Return the cost for computing the high part of a multiplication in MODE + when optimizing for SPEED. */ + +static inline int +mul_highpart_cost (bool speed, machine_mode mode) +{ + return *mul_highpart_cost_ptr (speed, mode); +} + +/* Subroutine of {set_,}convert_cost. Not to be used otherwise. */ + +static inline int * +convert_cost_ptr (machine_mode to_mode, machine_mode from_mode, + bool speed) +{ + int to_idx = expmed_mode_index (to_mode); + int from_idx = expmed_mode_index (from_mode); + + gcc_assert (IN_RANGE (to_idx, 0, NUM_MODE_IP_INT - 1)); + gcc_assert (IN_RANGE (from_idx, 0, NUM_MODE_IP_INT - 1)); + + return &this_target_expmed->x_convert_cost[speed][to_idx][from_idx]; +} + +/* Set the COST for converting from FROM_MODE to TO_MODE when optimizing + for SPEED. */ + +static inline void +set_convert_cost (machine_mode to_mode, machine_mode from_mode, + bool speed, int cost) +{ + *convert_cost_ptr (to_mode, from_mode, speed) = cost; +} + +/* Return the cost for converting from FROM_MODE to TO_MODE when optimizing + for SPEED. */ + +static inline int +convert_cost (machine_mode to_mode, machine_mode from_mode, + bool speed) +{ + return *convert_cost_ptr (to_mode, from_mode, speed); +} + +extern int mult_by_coeff_cost (HOST_WIDE_INT, machine_mode, bool); +extern rtx emit_cstore (rtx target, enum insn_code icode, enum rtx_code code, + machine_mode mode, machine_mode compare_mode, + int unsignedp, rtx x, rtx y, int normalizep, + machine_mode target_mode); + +/* Arguments MODE, RTX: return an rtx for the negation of that value. + May emit insns. */ +extern rtx negate_rtx (machine_mode, rtx); + +/* Arguments MODE, RTX: return an rtx for the flipping of that value. + May emit insns. */ +extern rtx flip_storage_order (machine_mode, rtx); + +/* Expand a logical AND operation. */ +extern rtx expand_and (machine_mode, rtx, rtx, rtx); + +/* Emit a store-flag operation. */ +extern rtx emit_store_flag (rtx, enum rtx_code, rtx, rtx, machine_mode, + int, int); + +/* Like emit_store_flag, but always succeeds. */ +extern rtx emit_store_flag_force (rtx, enum rtx_code, rtx, rtx, + machine_mode, int, int); + +extern void canonicalize_comparison (machine_mode, enum rtx_code *, rtx *); + +/* Choose a minimal N + 1 bit approximation to 1/D that can be used to + replace division by D, and put the least significant N bits of the result + in *MULTIPLIER_PTR and return the most significant bit. */ +extern unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int, + int, unsigned HOST_WIDE_INT *, + int *, int *); + +#ifdef TREE_CODE +extern rtx expand_variable_shift (enum tree_code, machine_mode, + rtx, tree, rtx, int); +extern rtx expand_shift (enum tree_code, machine_mode, rtx, poly_int64, rtx, + int); +#ifdef GCC_OPTABS_H +extern rtx expand_divmod (int, enum tree_code, machine_mode, rtx, rtx, + rtx, int, enum optab_methods = OPTAB_LIB_WIDEN); +#endif +#endif + +extern void store_bit_field (rtx, poly_uint64, poly_uint64, + poly_uint64, poly_uint64, + machine_mode, rtx, bool); +extern rtx extract_bit_field (rtx, poly_uint64, poly_uint64, int, rtx, + machine_mode, machine_mode, bool, rtx *); +extern rtx extract_low_bits (machine_mode, machine_mode, rtx); +extern rtx expand_mult (machine_mode, rtx, rtx, rtx, int, bool = false); +extern rtx expand_mult_highpart_adjust (scalar_int_mode, rtx, rtx, rtx, + rtx, int); + +#endif // EXPMED_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/expr.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/expr.h new file mode 100644 index 0000000..7e5cf49 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/expr.h @@ -0,0 +1,352 @@ +/* Definitions for code generation pass of GNU compiler. + Copyright (C) 1987-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_EXPR_H +#define GCC_EXPR_H + +/* This is the 4th arg to `expand_expr'. + EXPAND_STACK_PARM means we are possibly expanding a call param onto + the stack. + EXPAND_SUM means it is ok to return a PLUS rtx or MULT rtx. + EXPAND_INITIALIZER is similar but also record any labels on forced_labels. + EXPAND_CONST_ADDRESS means it is ok to return a MEM whose address + is a constant that is not a legitimate address. + EXPAND_WRITE means we are only going to write to the resulting rtx. + EXPAND_MEMORY means we are interested in a memory result, even if + the memory is constant and we could have propagated a constant value, + or the memory is unaligned on a STRICT_ALIGNMENT target. */ +enum expand_modifier {EXPAND_NORMAL = 0, EXPAND_STACK_PARM, EXPAND_SUM, + EXPAND_CONST_ADDRESS, EXPAND_INITIALIZER, EXPAND_WRITE, + EXPAND_MEMORY}; + +/* Prevent the compiler from deferring stack pops. See + inhibit_defer_pop for more information. */ +#define NO_DEFER_POP (inhibit_defer_pop += 1) + +/* Allow the compiler to defer stack pops. See inhibit_defer_pop for + more information. */ +#define OK_DEFER_POP (inhibit_defer_pop -= 1) + +/* This structure is used to pass around information about exploded + unary, binary and trinary expressions between expand_expr_real_1 and + friends. */ +typedef struct separate_ops +{ + enum tree_code code; + location_t location; + tree type; + tree op0, op1, op2; +} *sepops; + +/* This is run during target initialization to set up which modes can be + used directly in memory and to initialize the block move optab. */ +extern void init_expr_target (void); + +/* This is run at the start of compiling a function. */ +extern void init_expr (void); + +/* Emit some rtl insns to move data between rtx's, converting machine modes. + Both modes must be floating or both fixed. */ +extern void convert_move (rtx, rtx, int); + +/* Convert an rtx to specified machine mode and return the result. */ +extern rtx convert_to_mode (machine_mode, rtx, int); + +/* Convert an rtx to MODE from OLDMODE and return the result. */ +extern rtx convert_modes (machine_mode, machine_mode, rtx, int); + +/* Expand a call to memcpy or memmove or memcmp, and return the result. */ +extern rtx emit_block_op_via_libcall (enum built_in_function, rtx, rtx, rtx, + bool); + +static inline rtx +emit_block_copy_via_libcall (rtx dst, rtx src, rtx size, bool tailcall = false) +{ + return emit_block_op_via_libcall (BUILT_IN_MEMCPY, dst, src, size, tailcall); +} + +static inline rtx +emit_block_move_via_libcall (rtx dst, rtx src, rtx size, bool tailcall = false) +{ + return emit_block_op_via_libcall (BUILT_IN_MEMMOVE, dst, src, size, tailcall); +} + +static inline rtx +emit_block_comp_via_libcall (rtx dst, rtx src, rtx size, bool tailcall = false) +{ + return emit_block_op_via_libcall (BUILT_IN_MEMCMP, dst, src, size, tailcall); +} + +/* Emit code to move a block Y to a block X. */ +enum block_op_methods +{ + BLOCK_OP_NORMAL, + BLOCK_OP_NO_LIBCALL, + BLOCK_OP_CALL_PARM, + /* Like BLOCK_OP_NORMAL, but the libcall can be tail call optimized. */ + BLOCK_OP_TAILCALL, + /* Like BLOCK_OP_NO_LIBCALL, but instead of emitting a libcall return + pc_rtx to indicate nothing has been emitted and let the caller handle + it. */ + BLOCK_OP_NO_LIBCALL_RET +}; + +typedef rtx (*by_pieces_constfn) (void *, void *, HOST_WIDE_INT, + fixed_size_mode); + +/* The second pointer passed to by_pieces_constfn. */ +struct by_pieces_prev +{ + rtx data; + fixed_size_mode mode; +}; + +extern rtx emit_block_move (rtx, rtx, rtx, enum block_op_methods); +extern rtx emit_block_move_hints (rtx, rtx, rtx, enum block_op_methods, + unsigned int, HOST_WIDE_INT, + unsigned HOST_WIDE_INT, + unsigned HOST_WIDE_INT, + unsigned HOST_WIDE_INT, + bool bail_out_libcall = false, + bool *is_move_done = NULL, + bool might_overlap = false); +extern rtx emit_block_cmp_hints (rtx, rtx, rtx, tree, rtx, bool, + by_pieces_constfn, void *); +extern bool emit_storent_insn (rtx to, rtx from); + +/* Copy all or part of a value X into registers starting at REGNO. + The number of registers to be filled is NREGS. */ +extern void move_block_to_reg (int, rtx, int, machine_mode); + +/* Copy all or part of a BLKmode value X out of registers starting at REGNO. + The number of registers to be filled is NREGS. */ +extern void move_block_from_reg (int, rtx, int); + +/* Generate a non-consecutive group of registers represented by a PARALLEL. */ +extern rtx gen_group_rtx (rtx); + +/* Load a BLKmode value into non-consecutive registers represented by a + PARALLEL. */ +extern void emit_group_load (rtx, rtx, tree, poly_int64); + +/* Similarly, but load into new temporaries. */ +extern rtx emit_group_load_into_temps (rtx, rtx, tree, poly_int64); + +/* Move a non-consecutive group of registers represented by a PARALLEL into + a non-consecutive group of registers represented by a PARALLEL. */ +extern void emit_group_move (rtx, rtx); + +/* Move a group of registers represented by a PARALLEL into pseudos. */ +extern rtx emit_group_move_into_temps (rtx); + +/* Store a BLKmode value from non-consecutive registers represented by a + PARALLEL. */ +extern void emit_group_store (rtx, rtx, tree, poly_int64); + +extern rtx maybe_emit_group_store (rtx, tree); + +/* Mark REG as holding a parameter for the next CALL_INSN. + Mode is TYPE_MODE of the non-promoted parameter, or VOIDmode. */ +extern void use_reg_mode (rtx *, rtx, machine_mode); +extern void clobber_reg_mode (rtx *, rtx, machine_mode); + +extern rtx copy_blkmode_to_reg (machine_mode, tree); + +/* Mark REG as holding a parameter for the next CALL_INSN. */ +static inline void +use_reg (rtx *fusage, rtx reg) +{ + use_reg_mode (fusage, reg, VOIDmode); +} + +/* Mark REG as clobbered by the call with FUSAGE as CALL_INSN_FUNCTION_USAGE. */ +static inline void +clobber_reg (rtx *fusage, rtx reg) +{ + clobber_reg_mode (fusage, reg, VOIDmode); +} + +/* Mark NREGS consecutive regs, starting at REGNO, as holding parameters + for the next CALL_INSN. */ +extern void use_regs (rtx *, int, int); + +/* Mark a PARALLEL as holding a parameter for the next CALL_INSN. */ +extern void use_group_regs (rtx *, rtx); + +#ifdef GCC_INSN_CODES_H +extern rtx expand_cmpstrn_or_cmpmem (insn_code, rtx, rtx, rtx, tree, rtx, + HOST_WIDE_INT); +#endif + +/* Write zeros through the storage of OBJECT. + If OBJECT has BLKmode, SIZE is its length in bytes. */ +extern rtx clear_storage (rtx, rtx, enum block_op_methods); +extern rtx clear_storage_hints (rtx, rtx, enum block_op_methods, + unsigned int, HOST_WIDE_INT, + unsigned HOST_WIDE_INT, + unsigned HOST_WIDE_INT, + unsigned HOST_WIDE_INT, + unsigned); +/* The same, but always output an library call. */ +extern rtx set_storage_via_libcall (rtx, rtx, rtx, bool = false); + +/* Expand a setmem pattern; return true if successful. */ +extern bool set_storage_via_setmem (rtx, rtx, rtx, unsigned int, + unsigned int, HOST_WIDE_INT, + unsigned HOST_WIDE_INT, + unsigned HOST_WIDE_INT, + unsigned HOST_WIDE_INT); + +/* Return nonzero if it is desirable to store LEN bytes generated by + CONSTFUN with several move instructions by store_by_pieces + function. CONSTFUNDATA is a pointer which will be passed as argument + in every CONSTFUN call. + ALIGN is maximum alignment we can assume. + MEMSETP is true if this is a real memset/bzero, not a copy + of a const string. */ +extern int can_store_by_pieces (unsigned HOST_WIDE_INT, + by_pieces_constfn, + void *, unsigned int, bool); + +/* Generate several move instructions to store LEN bytes generated by + CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a + pointer which will be passed as argument in every CONSTFUN call. + ALIGN is maximum alignment we can assume. + MEMSETP is true if this is a real memset/bzero, not a copy. + Returns TO + LEN. */ +extern rtx store_by_pieces (rtx, unsigned HOST_WIDE_INT, by_pieces_constfn, + void *, unsigned int, bool, memop_ret); + +/* If can_store_by_pieces passes for worst-case values near MAX_LEN, call + store_by_pieces within conditionals so as to handle variable LEN efficiently, + storing VAL, if non-NULL_RTX, or valc instead. */ +extern bool try_store_by_multiple_pieces (rtx to, rtx len, + unsigned int ctz_len, + unsigned HOST_WIDE_INT min_len, + unsigned HOST_WIDE_INT max_len, + rtx val, char valc, + unsigned int align); + +/* Emit insns to set X from Y. */ +extern rtx_insn *emit_move_insn (rtx, rtx); +extern rtx_insn *gen_move_insn (rtx, rtx); + +/* Emit insns to set X from Y, with no frills. */ +extern rtx_insn *emit_move_insn_1 (rtx, rtx); + +extern rtx_insn *emit_move_complex_push (machine_mode, rtx, rtx); +extern rtx_insn *emit_move_complex_parts (rtx, rtx); +extern rtx read_complex_part (rtx, bool); +extern void write_complex_part (rtx, rtx, bool); +extern rtx read_complex_part (rtx, bool); +extern rtx emit_move_resolve_push (machine_mode, rtx); + +/* Push a block of length SIZE (perhaps variable) + and return an rtx to address the beginning of the block. */ +extern rtx push_block (rtx, poly_int64, int); + +/* Generate code to push something onto the stack, given its mode and type. */ +extern bool emit_push_insn (rtx, machine_mode, tree, rtx, unsigned int, + int, rtx, poly_int64, rtx, rtx, int, rtx, bool); + +/* Extract the accessible bit-range from a COMPONENT_REF. */ +extern void get_bit_range (poly_uint64_pod *, poly_uint64_pod *, tree, + poly_int64_pod *, tree *); + +/* Expand an assignment that stores the value of FROM into TO. */ +extern void expand_assignment (tree, tree, bool); + +/* Generate code for computing expression EXP, + and storing the value into TARGET. + If SUGGEST_REG is nonzero, copy the value through a register + and return that register, if that is possible. */ +extern rtx store_expr (tree, rtx, int, bool, bool); + +/* Given an rtx that may include add and multiply operations, + generate them as insns and return a pseudo-reg containing the value. + Useful after calling expand_expr with 1 as sum_ok. */ +extern rtx force_operand (rtx, rtx); + +/* Work horses for expand_expr. */ +extern rtx expand_expr_real (tree, rtx, machine_mode, + enum expand_modifier, rtx *, bool); +extern rtx expand_expr_real_1 (tree, rtx, machine_mode, + enum expand_modifier, rtx *, bool); +extern rtx expand_expr_real_2 (sepops, rtx, machine_mode, + enum expand_modifier); + +/* Generate code for computing expression EXP. + An rtx for the computed value is returned. The value is never null. + In the case of a void EXP, const0_rtx is returned. */ +static inline rtx +expand_expr (tree exp, rtx target, machine_mode mode, + enum expand_modifier modifier) +{ + return expand_expr_real (exp, target, mode, modifier, NULL, false); +} + +static inline rtx +expand_normal (tree exp) +{ + return expand_expr_real (exp, NULL_RTX, VOIDmode, EXPAND_NORMAL, NULL, false); +} + + +/* Return STRING_CST and set offset, size and decl, if the first + argument corresponds to a string constant. */ +extern tree string_constant (tree, tree *, tree *, tree *); +/* Similar to string_constant, return a STRING_CST corresponding + to the value representation of the first argument if it's + a constant. */ +extern tree byte_representation (tree, tree *, tree *, tree *); + +extern enum tree_code maybe_optimize_mod_cmp (enum tree_code, tree *, tree *); +extern void maybe_optimize_sub_cmp_0 (enum tree_code, tree *, tree *); + +/* Two different ways of generating switch statements. */ +extern int try_casesi (tree, tree, tree, tree, rtx, rtx, rtx, profile_probability); +extern int try_tablejump (tree, tree, tree, tree, rtx, rtx, profile_probability); + +extern int safe_from_p (const_rtx, tree, int); + +/* Get the personality libfunc for a function decl. */ +rtx get_personality_function (tree); + +/* Determine whether the LEN bytes can be moved by using several move + instructions. Return nonzero if a call to move_by_pieces should + succeed. */ +extern bool can_move_by_pieces (unsigned HOST_WIDE_INT, unsigned int); + +extern unsigned HOST_WIDE_INT highest_pow2_factor (const_tree); + +extern bool categorize_ctor_elements (const_tree, HOST_WIDE_INT *, + HOST_WIDE_INT *, HOST_WIDE_INT *, + bool *); + +extern void expand_operands (tree, tree, rtx, rtx*, rtx*, + enum expand_modifier); + +/* rtl.h and tree.h were included. */ +/* Return an rtx for the size in bytes of the value of an expr. */ +extern rtx expr_size (tree); + +extern bool mem_ref_refers_to_non_mem_p (tree); +extern bool non_mem_decl_p (tree); + +#endif /* GCC_EXPR_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/fibonacci_heap.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/fibonacci_heap.h new file mode 100644 index 0000000..e7dfba9 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/fibonacci_heap.h @@ -0,0 +1,684 @@ +/* Fibonacci heap for GNU compiler. + Copyright (C) 1998-2022 Free Software Foundation, Inc. + Contributed by Daniel Berlin (dan@cgsoftware.com). + Re-implemented in C++ by Martin Liska + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Fibonacci heaps are somewhat complex, but, there's an article in + DDJ that explains them pretty well: + + http://www.ddj.com/articles/1997/9701/9701o/9701o.htm?topic=algoritms + + Introduction to algorithms by Corman and Rivest also goes over them. + + The original paper that introduced them is "Fibonacci heaps and their + uses in improved network optimization algorithms" by Tarjan and + Fredman (JACM 34(3), July 1987). + + Amortized and real worst case time for operations: + + ExtractMin: O(lg n) amortized. O(n) worst case. + DecreaseKey: O(1) amortized. O(lg n) worst case. + Insert: O(1) amortized. + Union: O(1) amortized. */ + +#ifndef GCC_FIBONACCI_HEAP_H +#define GCC_FIBONACCI_HEAP_H + +/* Forward definition. */ + +template +class fibonacci_heap; + +/* Fibonacci heap node class. */ + +template +class fibonacci_node +{ + typedef fibonacci_node fibonacci_node_t; + friend class fibonacci_heap; + +public: + /* Default constructor. */ + fibonacci_node (): m_parent (NULL), m_child (NULL), m_left (this), + m_right (this), m_data (NULL), m_degree (0), m_mark (0) + { + } + + /* Constructor for a node with given KEY. */ + fibonacci_node (K key, V *data = NULL): m_parent (NULL), m_child (NULL), + m_left (this), m_right (this), m_key (key), m_data (data), + m_degree (0), m_mark (0) + { + } + + /* Compare fibonacci node with OTHER node. */ + int compare (fibonacci_node_t *other) + { + if (m_key < other->m_key) + return -1; + if (m_key > other->m_key) + return 1; + return 0; + } + + /* Compare the node with a given KEY. */ + int compare_data (K key) + { + return fibonacci_node_t (key).compare (this); + } + + /* Remove fibonacci heap node. */ + fibonacci_node_t *remove (); + + /* Link the node with PARENT. */ + void link (fibonacci_node_t *parent); + + /* Return key associated with the node. */ + K get_key () + { + return m_key; + } + + /* Return data associated with the node. */ + V *get_data () + { + return m_data; + } + +private: + /* Put node B after this node. */ + void insert_after (fibonacci_node_t *b); + + /* Insert fibonacci node B after this node. */ + void insert_before (fibonacci_node_t *b) + { + m_left->insert_after (b); + } + + /* Parent node. */ + fibonacci_node *m_parent; + /* Child node. */ + fibonacci_node *m_child; + /* Left sibling. */ + fibonacci_node *m_left; + /* Right node. */ + fibonacci_node *m_right; + /* Key associated with node. */ + K m_key; + /* Data associated with node. */ + V *m_data; + +#if defined (__GNUC__) && (!defined (SIZEOF_INT) || SIZEOF_INT < 4) + /* Degree of the node. */ + __extension__ unsigned long int m_degree : 31; + /* Mark of the node. */ + __extension__ unsigned long int m_mark : 1; +#else + /* Degree of the node. */ + unsigned int m_degree : 31; + /* Mark of the node. */ + unsigned int m_mark : 1; +#endif +}; + +/* Fibonacci heap class. */ +template +class fibonacci_heap +{ + typedef fibonacci_node fibonacci_node_t; + friend class fibonacci_node; + +public: + /* Default constructor. ALLOCATOR is optional and is primarily useful + when heaps are going to be merged (in that case they need to be allocated + in same alloc pool). */ + fibonacci_heap (K global_min_key, pool_allocator *allocator = NULL): + m_nodes (0), m_min (NULL), m_root (NULL), + m_global_min_key (global_min_key), + m_allocator (allocator), m_own_allocator (false) + { + if (!m_allocator) + { + m_allocator = new pool_allocator ("Fibonacci heap", + sizeof (fibonacci_node_t)); + m_own_allocator = true; + } + } + + /* Destructor. */ + ~fibonacci_heap () + { + /* Actual memory will be released by the destructor of m_allocator. */ + if (need_finalization_p () || !m_own_allocator) + while (m_min != NULL) + { + fibonacci_node_t *n = extract_minimum_node (); + n->~fibonacci_node_t (); + if (!m_own_allocator) + m_allocator->remove (n); + } + if (m_own_allocator) + delete m_allocator; + } + + /* Insert new node given by KEY and DATA associated with the key. */ + fibonacci_node_t *insert (K key, V *data); + + /* Return true if no entry is present. */ + bool empty () const + { + return m_nodes == 0; + } + + /* Return the number of nodes. */ + size_t nodes () const + { + return m_nodes; + } + + /* Return minimal key presented in the heap. */ + K min_key () const + { + if (m_min == NULL) + gcc_unreachable (); + + return m_min->m_key; + } + + /* For given NODE, set new KEY value. */ + K replace_key (fibonacci_node_t *node, K key) + { + K okey = node->m_key; + + replace_key_data (node, key, node->m_data); + return okey; + } + + /* For given NODE, decrease value to new KEY. */ + K decrease_key (fibonacci_node_t *node, K key) + { + gcc_assert (key <= node->m_key); + return replace_key (node, key); + } + + /* For given NODE, set new KEY and DATA value. */ + V *replace_key_data (fibonacci_node_t *node, K key, V *data); + + /* Extract minimum node in the heap. If RELEASE is specified, + memory is released. */ + V *extract_min (bool release = true); + + /* Return value associated with minimum node in the heap. */ + V *min () const + { + if (m_min == NULL) + return NULL; + + return m_min->m_data; + } + + /* Replace data associated with NODE and replace it with DATA. */ + V *replace_data (fibonacci_node_t *node, V *data) + { + return replace_key_data (node, node->m_key, data); + } + + /* Delete NODE in the heap. */ + V *delete_node (fibonacci_node_t *node, bool release = true); + + /* Union the heap with HEAPB. */ + fibonacci_heap *union_with (fibonacci_heap *heapb); + +private: + /* Insert new NODE given by KEY and DATA associated with the key. */ + fibonacci_node_t *insert (fibonacci_node_t *node, K key, V *data); + + /* Insert new NODE that has already filled key and value. */ + fibonacci_node_t *insert_node (fibonacci_node_t *node); + + /* Insert it into the root list. */ + void insert_root (fibonacci_node_t *node); + + /* Remove NODE from PARENT's child list. */ + void cut (fibonacci_node_t *node, fibonacci_node_t *parent); + + /* Process cut of node Y and do it recursivelly. */ + void cascading_cut (fibonacci_node_t *y); + + /* Extract minimum node from the heap. */ + fibonacci_node_t * extract_minimum_node (); + + /* Remove root NODE from the heap. */ + void remove_root (fibonacci_node_t *node); + + /* Consolidate heap. */ + void consolidate (); + + /* Number of nodes. */ + size_t m_nodes; + /* Minimum node of the heap. */ + fibonacci_node_t *m_min; + /* Root node of the heap. */ + fibonacci_node_t *m_root; + /* Global minimum given in the heap construction. */ + K m_global_min_key; + + /* Allocator used to hold nodes. */ + pool_allocator *m_allocator; + /* True if alocator is owned by the current heap only. */ + bool m_own_allocator; +}; + +/* Remove fibonacci heap node. */ + +template +fibonacci_node * +fibonacci_node::remove () +{ + fibonacci_node *ret; + + if (this == m_left) + ret = NULL; + else + ret = m_left; + + if (m_parent != NULL && m_parent->m_child == this) + m_parent->m_child = ret; + + m_right->m_left = m_left; + m_left->m_right = m_right; + + m_parent = NULL; + m_left = this; + m_right = this; + + return ret; +} + +/* Link the node with PARENT. */ + +template +void +fibonacci_node::link (fibonacci_node *parent) +{ + if (parent->m_child == NULL) + parent->m_child = this; + else + parent->m_child->insert_before (this); + m_parent = parent; + parent->m_degree++; + m_mark = 0; +} + +/* Put node B after this node. */ + +template +void +fibonacci_node::insert_after (fibonacci_node *b) +{ + fibonacci_node *a = this; + + if (a == a->m_right) + { + a->m_right = b; + a->m_left = b; + b->m_right = a; + b->m_left = a; + } + else + { + b->m_right = a->m_right; + a->m_right->m_left = b; + a->m_right = b; + b->m_left = a; + } +} + +/* Insert new node given by KEY and DATA associated with the key. */ + +template +fibonacci_node* +fibonacci_heap::insert (K key, V *data) +{ + /* Create the new node. */ + fibonacci_node *node = new (m_allocator->allocate ()) + fibonacci_node_t (key, data); + + return insert_node (node); +} + +/* Insert new NODE given by DATA associated with the key. */ + +template +fibonacci_node* +fibonacci_heap::insert (fibonacci_node_t *node, K key, V *data) +{ + /* Set the node's data. */ + node->m_data = data; + node->m_key = key; + + return insert_node (node); +} + +/* Insert new NODE that has already filled key and value. */ + +template +fibonacci_node* +fibonacci_heap::insert_node (fibonacci_node_t *node) +{ + /* Insert it into the root list. */ + insert_root (node); + + /* If their was no minimum, or this key is less than the min, + it's the new min. */ + if (m_min == NULL || node->m_key < m_min->m_key) + m_min = node; + + m_nodes++; + + return node; +} + +/* For given NODE, set new KEY and DATA value. */ + +template +V* +fibonacci_heap::replace_key_data (fibonacci_node *node, K key, + V *data) +{ + K okey; + fibonacci_node *y; + V *odata = node->m_data; + + /* If we wanted to, we do a real increase by redeleting and + inserting. */ + if (node->compare_data (key) > 0) + { + delete_node (node, false); + + node = new (node) fibonacci_node_t (); + insert (node, key, data); + + return odata; + } + + okey = node->m_key; + node->m_data = data; + node->m_key = key; + y = node->m_parent; + + /* Short-circuit if the key is the same, as we then don't have to + do anything. Except if we're trying to force the new node to + be the new minimum for delete. */ + if (okey == key && okey != m_global_min_key) + return odata; + + /* These two compares are specifically <= 0 to make sure that in the case + of equality, a node we replaced the data on, becomes the new min. This + is needed so that delete's call to extractmin gets the right node. */ + if (y != NULL && node->compare (y) <= 0) + { + cut (node, y); + cascading_cut (y); + } + + if (node->compare (m_min) <= 0) + m_min = node; + + return odata; +} + +/* Extract minimum node in the heap. Delete fibonacci node if RELEASE + is true. */ + +template +V* +fibonacci_heap::extract_min (bool release) +{ + fibonacci_node *z; + V *ret = NULL; + + /* If we don't have a min set, it means we have no nodes. */ + if (m_min != NULL) + { + /* Otherwise, extract the min node, free the node, and return the + node's data. */ + z = extract_minimum_node (); + ret = z->m_data; + + if (release) + { + z->~fibonacci_node_t (); + m_allocator->remove (z); + } + } + + return ret; +} + +/* Delete NODE in the heap, if RELEASE is specified memory is released. */ + +template +V* +fibonacci_heap::delete_node (fibonacci_node *node, bool release) +{ + V *ret = node->m_data; + + /* To perform delete, we just make it the min key, and extract. */ + replace_key (node, m_global_min_key); + if (node != m_min) + { + fprintf (stderr, "Can't force minimum on fibheap.\n"); + abort (); + } + extract_min (release); + + return ret; +} + +/* Union the heap with HEAPB. One of the heaps is going to be deleted. */ + +template +fibonacci_heap* +fibonacci_heap::union_with (fibonacci_heap *heapb) +{ + fibonacci_heap *heapa = this; + + fibonacci_node *a_root, *b_root; + + /* Both heaps must share allocator. */ + gcc_checking_assert (m_allocator == heapb->m_allocator); + + /* If one of the heaps is empty, the union is just the other heap. */ + if ((a_root = heapa->m_root) == NULL) + { + delete (heapa); + return heapb; + } + if ((b_root = heapb->m_root) == NULL) + { + delete (heapb); + return heapa; + } + + /* Merge them to the next nodes on the opposite chain. */ + a_root->m_left->m_right = b_root; + b_root->m_left->m_right = a_root; + std::swap (a_root->m_left, b_root->m_left); + heapa->m_nodes += heapb->m_nodes; + + /* And set the new minimum, if it's changed. */ + if (heapb->m_min->compare (heapa->m_min) < 0) + heapa->m_min = heapb->m_min; + + /* Set m_min to NULL to not to delete live fibonacci nodes. */ + heapb->m_min = NULL; + delete (heapb); + + return heapa; +} + +/* Insert it into the root list. */ + +template +void +fibonacci_heap::insert_root (fibonacci_node_t *node) +{ + /* If the heap is currently empty, the new node becomes the singleton + circular root list. */ + if (m_root == NULL) + { + m_root = node; + node->m_left = node; + node->m_right = node; + return; + } + + /* Otherwise, insert it in the circular root list between the root + and it's right node. */ + m_root->insert_after (node); +} + +/* Remove NODE from PARENT's child list. */ + +template +void +fibonacci_heap::cut (fibonacci_node *node, + fibonacci_node *parent) +{ + node->remove (); + parent->m_degree--; + insert_root (node); + node->m_parent = NULL; + node->m_mark = 0; +} + +/* Process cut of node Y and do it recursivelly. */ + +template +void +fibonacci_heap::cascading_cut (fibonacci_node *y) +{ + fibonacci_node *z; + + while ((z = y->m_parent) != NULL) + { + if (y->m_mark == 0) + { + y->m_mark = 1; + return; + } + else + { + cut (y, z); + y = z; + } + } +} + +/* Extract minimum node from the heap. */ + +template +fibonacci_node* +fibonacci_heap::extract_minimum_node () +{ + fibonacci_node *ret = m_min; + fibonacci_node *x, *y, *orig; + + /* Attach the child list of the minimum node to the root list of the heap. + If there is no child list, we don't do squat. */ + for (x = ret->m_child, orig = NULL; x != orig && x != NULL; x = y) + { + if (orig == NULL) + orig = x; + y = x->m_right; + x->m_parent = NULL; + insert_root (x); + } + + /* Remove the old root. */ + remove_root (ret); + m_nodes--; + + /* If we are left with no nodes, then the min is NULL. */ + if (m_nodes == 0) + m_min = NULL; + else + { + /* Otherwise, consolidate to find new minimum, as well as do the reorg + work that needs to be done. */ + m_min = ret->m_right; + consolidate (); + } + + return ret; +} + +/* Remove root NODE from the heap. */ + +template +void +fibonacci_heap::remove_root (fibonacci_node *node) +{ + if (node->m_left == node) + m_root = NULL; + else + m_root = node->remove (); +} + +/* Consolidate heap. */ + +template +void fibonacci_heap::consolidate () +{ + const int D = 1 + 8 * sizeof (long); + fibonacci_node *a[D]; + fibonacci_node *w, *x, *y; + int i, d; + + memset (a, 0, sizeof (a)); + + while ((w = m_root) != NULL) + { + x = w; + remove_root (w); + d = x->m_degree; + gcc_checking_assert (d < D); + while (a[d] != NULL) + { + y = a[d]; + if (x->compare (y) > 0) + std::swap (x, y); + y->link (x); + a[d] = NULL; + d++; + } + a[d] = x; + } + m_min = NULL; + for (i = 0; i < D; i++) + if (a[i] != NULL) + { + insert_root (a[i]); + if (m_min == NULL || a[i]->compare (m_min) < 0) + m_min = a[i]; + } +} + +#endif // GCC_FIBONACCI_HEAP_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/file-find.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/file-find.h new file mode 100644 index 0000000..54a276c --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/file-find.h @@ -0,0 +1,47 @@ +/* Prototypes and data structures used for implementing functions for + finding files relative to GCC binaries. + Copyright (C) 1992-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_FILE_FIND_H +#define GCC_FILE_FIND_H + +/* Structure to hold all the directories in which to search for files to + execute. */ + +struct prefix_list +{ + const char *prefix; /* String to prepend to the path. */ + struct prefix_list *next; /* Next in linked list. */ +}; + +struct path_prefix +{ + struct prefix_list *plist; /* List of prefixes to try */ + int max_len; /* Max length of a prefix in PLIST */ + const char *name; /* Name of this list (used in config stuff) */ +}; + +extern void find_file_set_debug (bool); +extern char *find_a_file (struct path_prefix *, const char *, int); +extern void add_prefix (struct path_prefix *, const char *); +extern void add_prefix_begin (struct path_prefix *, const char *); +extern void prefix_from_env (const char *, struct path_prefix *); +extern void prefix_from_string (const char *, struct path_prefix *); + +#endif /* GCC_FILE_FIND_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/file-prefix-map.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/file-prefix-map.h new file mode 100644 index 0000000..d975840 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/file-prefix-map.h @@ -0,0 +1,30 @@ +/* Declarations for file prefix remapping support (-f*-prefix-map options). + Copyright (C) 2017-2022 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the + Free Software Foundation; either version 3, or (at your option) any + later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING3. If not see + . */ + +#ifndef GCC_FILE_PREFIX_MAP_H +#define GCC_FILE_PREFIX_MAP_H + +void add_macro_prefix_map (const char *); +void add_debug_prefix_map (const char *); +void add_file_prefix_map (const char *); +void add_profile_prefix_map (const char *); + +const char *remap_macro_filename (const char *); +const char *remap_debug_filename (const char *); +const char *remap_profile_filename (const char *); + +#endif /* !GCC_FILE_PREFIX_MAP_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/filenames.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/filenames.h new file mode 100644 index 0000000..6c72c42 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/filenames.h @@ -0,0 +1,100 @@ +/* Macros for taking apart, interpreting and processing file names. + + These are here because some non-Posix (a.k.a. DOSish) systems have + drive letter brain-damage at the beginning of an absolute file name, + use forward- and back-slash in path names interchangeably, and + some of them have case-insensitive file names. + + Copyright (C) 2000-2022 Free Software Foundation, Inc. + +This file is part of BFD, the Binary File Descriptor library. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ + +#ifndef FILENAMES_H +#define FILENAMES_H + +#include "hashtab.h" /* for hashval_t */ + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__MSDOS__) || (defined(_WIN32) && ! defined(__CYGWIN__)) || \ + defined(__OS2__) +# ifndef HAVE_DOS_BASED_FILE_SYSTEM +# define HAVE_DOS_BASED_FILE_SYSTEM 1 +# endif +# ifndef HAVE_CASE_INSENSITIVE_FILE_SYSTEM +# define HAVE_CASE_INSENSITIVE_FILE_SYSTEM 1 +# endif +# define HAS_DRIVE_SPEC(f) HAS_DOS_DRIVE_SPEC (f) +# define IS_DIR_SEPARATOR(c) IS_DOS_DIR_SEPARATOR (c) +# define IS_ABSOLUTE_PATH(f) IS_DOS_ABSOLUTE_PATH (f) +#else /* not DOSish */ +# if defined(__APPLE__) +# ifndef HAVE_CASE_INSENSITIVE_FILE_SYSTEM +# define HAVE_CASE_INSENSITIVE_FILE_SYSTEM 1 +# endif +# endif /* __APPLE__ */ +# define HAS_DRIVE_SPEC(f) (0) +# define IS_DIR_SEPARATOR(c) IS_UNIX_DIR_SEPARATOR (c) +# define IS_ABSOLUTE_PATH(f) IS_UNIX_ABSOLUTE_PATH (f) +#endif + +#define IS_DIR_SEPARATOR_1(dos_based, c) \ + (((c) == '/') \ + || (((c) == '\\') && (dos_based))) + +#define HAS_DRIVE_SPEC_1(dos_based, f) \ + ((f)[0] && ((f)[1] == ':') && (dos_based)) + +/* Remove the drive spec from F, assuming HAS_DRIVE_SPEC (f). + The result is a pointer to the remainder of F. */ +#define STRIP_DRIVE_SPEC(f) ((f) + 2) + +#define IS_DOS_DIR_SEPARATOR(c) IS_DIR_SEPARATOR_1 (1, c) +#define IS_DOS_ABSOLUTE_PATH(f) IS_ABSOLUTE_PATH_1 (1, f) +#define HAS_DOS_DRIVE_SPEC(f) HAS_DRIVE_SPEC_1 (1, f) + +#define IS_UNIX_DIR_SEPARATOR(c) IS_DIR_SEPARATOR_1 (0, c) +#define IS_UNIX_ABSOLUTE_PATH(f) IS_ABSOLUTE_PATH_1 (0, f) + +/* Note that when DOS_BASED is true, IS_ABSOLUTE_PATH accepts d:foo as + well, although it is only semi-absolute. This is because the users + of IS_ABSOLUTE_PATH want to know whether to prepend the current + working directory to a file name, which should not be done with a + name like d:foo. */ +#define IS_ABSOLUTE_PATH_1(dos_based, f) \ + (IS_DIR_SEPARATOR_1 (dos_based, (f)[0]) \ + || HAS_DRIVE_SPEC_1 (dos_based, f)) + +extern int filename_cmp (const char *s1, const char *s2); +#define FILENAME_CMP(s1, s2) filename_cmp(s1, s2) + +extern int filename_ncmp (const char *s1, const char *s2, + size_t n); + +extern hashval_t filename_hash (const void *s); + +extern int filename_eq (const void *s1, const void *s2); + +extern int canonical_filename_eq (const char *a, const char *b); + +#ifdef __cplusplus +} +#endif + +#endif /* FILENAMES_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/fixed-value.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/fixed-value.h new file mode 100644 index 0000000..722b26c --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/fixed-value.h @@ -0,0 +1,111 @@ +/* Fixed-point arithmetic support. + Copyright (C) 2006-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_FIXED_VALUE_H +#define GCC_FIXED_VALUE_H + +struct GTY(()) fixed_value +{ + double_int data; /* Store data up to 2 wide integers. */ + scalar_mode_pod mode; /* Use machine mode to know IBIT and FBIT. */ +}; + +#define FIXED_VALUE_TYPE struct fixed_value + +#define MAX_FCONST0 18 /* For storing 18 fixed-point zeros per + fract, ufract, accum, and uaccum modes . */ +#define MAX_FCONST1 8 /* For storing 8 fixed-point ones per accum + and uaccum modes. */ +/* Constant fixed-point values 0 and 1. */ +extern FIXED_VALUE_TYPE fconst0[MAX_FCONST0]; +extern FIXED_VALUE_TYPE fconst1[MAX_FCONST1]; + +/* Macros to access fconst0 and fconst1 via machine modes. */ +#define FCONST0(mode) fconst0[mode - QQmode] +#define FCONST1(mode) fconst1[mode - HAmode] + +/* Return a CONST_FIXED with value R and mode M. */ +#define CONST_FIXED_FROM_FIXED_VALUE(r, m) \ + const_fixed_from_fixed_value (r, m) +extern rtx const_fixed_from_fixed_value (FIXED_VALUE_TYPE, machine_mode); + +/* Construct a FIXED_VALUE from a bit payload and machine mode MODE. + The bits in PAYLOAD are sign-extended/zero-extended according to MODE. */ +extern FIXED_VALUE_TYPE fixed_from_double_int (double_int, scalar_mode); + +/* Return a CONST_FIXED from a bit payload and machine mode MODE. + The bits in PAYLOAD are sign-extended/zero-extended according to MODE. */ +static inline rtx +const_fixed_from_double_int (double_int payload, + scalar_mode mode) +{ + return + const_fixed_from_fixed_value (fixed_from_double_int (payload, mode), + mode); +} + +/* Initialize from a decimal or hexadecimal string. */ +extern void fixed_from_string (FIXED_VALUE_TYPE *, const char *, + scalar_mode); + +/* In tree.cc: wrap up a FIXED_VALUE_TYPE in a tree node. */ +extern tree build_fixed (tree, FIXED_VALUE_TYPE); + +/* Extend or truncate to a new mode. */ +extern bool fixed_convert (FIXED_VALUE_TYPE *, scalar_mode, + const FIXED_VALUE_TYPE *, bool); + +/* Convert to a fixed-point mode from an integer. */ +extern bool fixed_convert_from_int (FIXED_VALUE_TYPE *, scalar_mode, + double_int, bool, bool); + +/* Convert to a fixed-point mode from a real. */ +extern bool fixed_convert_from_real (FIXED_VALUE_TYPE *, scalar_mode, + const REAL_VALUE_TYPE *, bool); + +/* Convert to a real mode from a fixed-point. */ +extern void real_convert_from_fixed (REAL_VALUE_TYPE *, scalar_mode, + const FIXED_VALUE_TYPE *); + +/* Compare two fixed-point objects for bitwise identity. */ +extern bool fixed_identical (const FIXED_VALUE_TYPE *, const FIXED_VALUE_TYPE *); + +/* Calculate a hash value. */ +extern unsigned int fixed_hash (const FIXED_VALUE_TYPE *); + +#define FIXED_VALUES_IDENTICAL(x, y) fixed_identical (&(x), &(y)) + +/* Determine whether a fixed-point value X is negative. */ +#define FIXED_VALUE_NEGATIVE(x) fixed_isneg (&(x)) + +/* Render F as a decimal floating point constant. */ +extern void fixed_to_decimal (char *str, const FIXED_VALUE_TYPE *, size_t); + +/* Binary or unary arithmetic on tree_code. */ +extern bool fixed_arithmetic (FIXED_VALUE_TYPE *, int, const FIXED_VALUE_TYPE *, + const FIXED_VALUE_TYPE *, bool); + +/* Compare fixed-point values by tree_code. */ +extern bool fixed_compare (int, const FIXED_VALUE_TYPE *, + const FIXED_VALUE_TYPE *); + +/* Determine whether a fixed-point value X is negative. */ +extern bool fixed_isneg (const FIXED_VALUE_TYPE *); + +#endif /* GCC_FIXED_VALUE_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/flag-types.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/flag-types.h new file mode 100644 index 0000000..2c84981 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/flag-types.h @@ -0,0 +1,514 @@ +/* Compilation switch flag type definitions for GCC. + Copyright (C) 1987-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_FLAG_TYPES_H +#define GCC_FLAG_TYPES_H + +#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS) + +enum debug_info_type +{ + DINFO_TYPE_NONE = 0, /* No debug info. */ + DINFO_TYPE_DBX = 1, /* BSD .stabs for DBX. */ + DINFO_TYPE_DWARF2 = 2, /* Dwarf v2 debug info. */ + DINFO_TYPE_XCOFF = 3, /* IBM/Xcoff debug info. */ + DINFO_TYPE_VMS = 4, /* VMS debug info. */ + DINFO_TYPE_CTF = 5, /* CTF debug info. */ + DINFO_TYPE_BTF = 6, /* BTF debug info. */ + DINFO_TYPE_BTF_WITH_CORE = 7, /* BTF debug info with CO-RE relocations. */ + DINFO_TYPE_MAX = DINFO_TYPE_BTF_WITH_CORE /* Marker only. */ +}; + +#define NO_DEBUG (0U) +/* Write DBX debug info (using dbxout.cc). */ +#define DBX_DEBUG (1U << DINFO_TYPE_DBX) +/* Write DWARF2 debug info (using dwarf2out.cc). */ +#define DWARF2_DEBUG (1U << DINFO_TYPE_DWARF2) +/* Write IBM/XCOFF debug info (using dbxout.cc). */ +#define XCOFF_DEBUG (1U << DINFO_TYPE_XCOFF) +/* Write VMS debug info (using vmsdbgout.cc). */ +#define VMS_DEBUG (1U << DINFO_TYPE_VMS) +/* Write CTF debug info (using ctfout.cc). */ +#define CTF_DEBUG (1U << DINFO_TYPE_CTF) +/* Write BTF debug info (using btfout.cc). */ +#define BTF_DEBUG (1U << DINFO_TYPE_BTF) +/* Write BTF debug info for BPF CO-RE usecase (using btfout.cc). */ +#define BTF_WITH_CORE_DEBUG (1U << DINFO_TYPE_BTF_WITH_CORE) + +/* Note: Adding new definitions to handle -combination- of debug formats, + like VMS_AND_DWARF2_DEBUG is not recommended. This definition remains + here for historical reasons. */ +/* Write VMS debug info (using vmsdbgout.cc) and DWARF v2 debug info (using + dwarf2out.cc). */ +#define VMS_AND_DWARF2_DEBUG ((VMS_DEBUG | DWARF2_DEBUG)) + +enum debug_info_levels +{ + DINFO_LEVEL_NONE, /* Write no debugging info. */ + DINFO_LEVEL_TERSE, /* Write minimal info to support tracebacks only. */ + DINFO_LEVEL_NORMAL, /* Write info for all declarations (and line table). */ + DINFO_LEVEL_VERBOSE /* Write normal info plus #define/#undef info. */ +}; + +/* CTF debug info levels. + CTF debug info levels are untied with DWARF debug info levels because CTF + may co-exist with DWARF. */ +enum ctf_debug_info_levels +{ + CTFINFO_LEVEL_NONE = 0, /* Write no CTF debug info. */ + CTFINFO_LEVEL_TERSE = 1, /* Write CTF information to support tracebacks + only. Not Implemented. */ + CTFINFO_LEVEL_NORMAL = 2 /* Write CTF type information for all entities + (functions, data objects, variables etc.) + at file-scope or global-scope only. */ +}; + +/* A major contribution to object and executable size is debug + information size. A major contribution to debug information + size is struct descriptions replicated in several object files. + The following function determines whether or not debug information + should be generated for a given struct. The indirect parameter + indicates that the struct is being handled indirectly, via + a pointer. See opts.cc for the implementation. */ + +enum debug_info_usage +{ + DINFO_USAGE_DFN, /* A struct definition. */ + DINFO_USAGE_DIR_USE, /* A direct use, such as the type of a variable. */ + DINFO_USAGE_IND_USE, /* An indirect use, such as through a pointer. */ + DINFO_USAGE_NUM_ENUMS /* The number of enumerators. */ +}; + +/* A major contribution to object and executable size is debug + information size. A major contribution to debug information size + is struct descriptions replicated in several object files. The + following flags attempt to reduce this information. The basic + idea is to not emit struct debugging information in the current + compilation unit when that information will be generated by + another compilation unit. + + Debug information for a struct defined in the current source + file should be generated in the object file. Likewise the + debug information for a struct defined in a header should be + generated in the object file of the corresponding source file. + Both of these case are handled when the base name of the file of + the struct definition matches the base name of the source file + of the current compilation unit. This matching emits minimal + struct debugging information. + + The base file name matching rule above will fail to emit debug + information for structs defined in system headers. So a second + category of files includes system headers in addition to files + with matching bases. + + The remaining types of files are library headers and application + headers. We cannot currently distinguish these two types. */ + +enum debug_struct_file +{ + DINFO_STRUCT_FILE_NONE, /* Debug no structs. */ + DINFO_STRUCT_FILE_BASE, /* Debug structs defined in files with the + same base name as the compilation unit. */ + DINFO_STRUCT_FILE_SYS, /* Also debug structs defined in system + header files. */ + DINFO_STRUCT_FILE_ANY /* Debug structs defined in all files. */ +}; + +/* Balance between GNAT encodings and standard DWARF to emit. */ + +enum dwarf_gnat_encodings +{ + DWARF_GNAT_ENCODINGS_ALL = 0, /* Emit all GNAT encodings, then emit as + much standard DWARF as possible so it + does not conflict with GNAT + encodings. */ + DWARF_GNAT_ENCODINGS_GDB = 1, /* Emit as much standard DWARF as possible + as long as GDB handles them. Emit GNAT + encodings for the rest. */ + DWARF_GNAT_ENCODINGS_MINIMAL = 2 /* Emit all the standard DWARF we can. + Emit GNAT encodings for the rest. */ +}; + +/* Enumerate Objective-c instance variable visibility settings. */ + +enum ivar_visibility +{ + IVAR_VISIBILITY_PRIVATE, + IVAR_VISIBILITY_PROTECTED, + IVAR_VISIBILITY_PUBLIC, + IVAR_VISIBILITY_PACKAGE +}; + +/* The stack reuse level. */ +enum stack_reuse_level +{ + SR_NONE, + SR_NAMED_VARS, + SR_ALL +}; + +/* The live patching level. */ +enum live_patching_level +{ + LIVE_PATCHING_NONE = 0, + LIVE_PATCHING_INLINE_ONLY_STATIC, + LIVE_PATCHING_INLINE_CLONE +}; + +/* The algorithm used for basic block reordering. */ +enum reorder_blocks_algorithm +{ + REORDER_BLOCKS_ALGORITHM_SIMPLE, + REORDER_BLOCKS_ALGORITHM_STC +}; + +/* The algorithm used for the integrated register allocator (IRA). */ +enum ira_algorithm +{ + IRA_ALGORITHM_CB, + IRA_ALGORITHM_PRIORITY +}; + +/* The regions used for the integrated register allocator (IRA). */ +enum ira_region +{ + IRA_REGION_ONE, + IRA_REGION_ALL, + IRA_REGION_MIXED, +}; + +/* The options for excess precision. */ +enum excess_precision +{ + EXCESS_PRECISION_DEFAULT, + EXCESS_PRECISION_FAST, + EXCESS_PRECISION_STANDARD, + EXCESS_PRECISION_FLOAT16 +}; + +/* The options for which values of FLT_EVAL_METHOD are permissible. */ +enum permitted_flt_eval_methods +{ + PERMITTED_FLT_EVAL_METHODS_DEFAULT, + PERMITTED_FLT_EVAL_METHODS_TS_18661, + PERMITTED_FLT_EVAL_METHODS_C11 +}; + +/* Type of stack check. + + Stack checking is designed to detect infinite recursion and stack + overflows for Ada programs. Furthermore stack checking tries to ensure + in that scenario that enough stack space is left to run a signal handler. + + -fstack-check= does not prevent stack-clash style attacks. For that + you want -fstack-clash-protection. */ +enum stack_check_type +{ + /* Do not check the stack. */ + NO_STACK_CHECK = 0, + + /* Check the stack generically, i.e. assume no specific support + from the target configuration files. */ + GENERIC_STACK_CHECK, + + /* Check the stack and rely on the target configuration files to + check the static frame of functions, i.e. use the generic + mechanism only for dynamic stack allocations. */ + STATIC_BUILTIN_STACK_CHECK, + + /* Check the stack and entirely rely on the target configuration + files, i.e. do not use the generic mechanism at all. */ + FULL_BUILTIN_STACK_CHECK +}; + +/* Type of callgraph information. */ +enum callgraph_info_type +{ + /* No information. */ + NO_CALLGRAPH_INFO = 0, + + /* Naked callgraph. */ + CALLGRAPH_INFO_NAKED = 1, + + /* Callgraph decorated with stack usage information. */ + CALLGRAPH_INFO_STACK_USAGE = 2, + + /* Callgraph decoration with dynamic allocation information. */ + CALLGRAPH_INFO_DYNAMIC_ALLOC = 4 +}; + +/* Floating-point contraction mode. */ +enum fp_contract_mode { + FP_CONTRACT_OFF = 0, + FP_CONTRACT_ON = 1, + FP_CONTRACT_FAST = 2 +}; + +/* Scalar storage order kind. */ +enum scalar_storage_order_kind { + SSO_NATIVE = 0, + SSO_BIG_ENDIAN, + SSO_LITTLE_ENDIAN +}; + +/* Vectorizer cost-model. Except for DEFAULT, the values are ordered from + the most conservative to the least conservative. */ +enum vect_cost_model { + VECT_COST_MODEL_VERY_CHEAP = -3, + VECT_COST_MODEL_CHEAP = -2, + VECT_COST_MODEL_DYNAMIC = -1, + VECT_COST_MODEL_UNLIMITED = 0, + VECT_COST_MODEL_DEFAULT = 1 +}; + +/* Automatic variable initialization type. */ +enum auto_init_type { + AUTO_INIT_UNINITIALIZED = 0, + AUTO_INIT_PATTERN = 1, + AUTO_INIT_ZERO = 2 +}; + +/* Different instrumentation modes. */ +enum sanitize_code { + /* AddressSanitizer. */ + SANITIZE_ADDRESS = 1UL << 0, + SANITIZE_USER_ADDRESS = 1UL << 1, + SANITIZE_KERNEL_ADDRESS = 1UL << 2, + /* ThreadSanitizer. */ + SANITIZE_THREAD = 1UL << 3, + /* LeakSanitizer. */ + SANITIZE_LEAK = 1UL << 4, + /* UndefinedBehaviorSanitizer. */ + SANITIZE_SHIFT_BASE = 1UL << 5, + SANITIZE_SHIFT_EXPONENT = 1UL << 6, + SANITIZE_DIVIDE = 1UL << 7, + SANITIZE_UNREACHABLE = 1UL << 8, + SANITIZE_VLA = 1UL << 9, + SANITIZE_NULL = 1UL << 10, + SANITIZE_RETURN = 1UL << 11, + SANITIZE_SI_OVERFLOW = 1UL << 12, + SANITIZE_BOOL = 1UL << 13, + SANITIZE_ENUM = 1UL << 14, + SANITIZE_FLOAT_DIVIDE = 1UL << 15, + SANITIZE_FLOAT_CAST = 1UL << 16, + SANITIZE_BOUNDS = 1UL << 17, + SANITIZE_ALIGNMENT = 1UL << 18, + SANITIZE_NONNULL_ATTRIBUTE = 1UL << 19, + SANITIZE_RETURNS_NONNULL_ATTRIBUTE = 1UL << 20, + SANITIZE_OBJECT_SIZE = 1UL << 21, + SANITIZE_VPTR = 1UL << 22, + SANITIZE_BOUNDS_STRICT = 1UL << 23, + SANITIZE_POINTER_OVERFLOW = 1UL << 24, + SANITIZE_BUILTIN = 1UL << 25, + SANITIZE_POINTER_COMPARE = 1UL << 26, + SANITIZE_POINTER_SUBTRACT = 1UL << 27, + SANITIZE_HWADDRESS = 1UL << 28, + SANITIZE_USER_HWADDRESS = 1UL << 29, + SANITIZE_KERNEL_HWADDRESS = 1UL << 30, + /* Shadow Call Stack. */ + SANITIZE_SHADOW_CALL_STACK = 1UL << 31, + SANITIZE_SHIFT = SANITIZE_SHIFT_BASE | SANITIZE_SHIFT_EXPONENT, + SANITIZE_UNDEFINED = SANITIZE_SHIFT | SANITIZE_DIVIDE | SANITIZE_UNREACHABLE + | SANITIZE_VLA | SANITIZE_NULL | SANITIZE_RETURN + | SANITIZE_SI_OVERFLOW | SANITIZE_BOOL | SANITIZE_ENUM + | SANITIZE_BOUNDS | SANITIZE_ALIGNMENT + | SANITIZE_NONNULL_ATTRIBUTE + | SANITIZE_RETURNS_NONNULL_ATTRIBUTE + | SANITIZE_OBJECT_SIZE | SANITIZE_VPTR + | SANITIZE_POINTER_OVERFLOW | SANITIZE_BUILTIN, + SANITIZE_UNDEFINED_NONDEFAULT = SANITIZE_FLOAT_DIVIDE | SANITIZE_FLOAT_CAST + | SANITIZE_BOUNDS_STRICT +}; + +/* Different settings for zeroing subset of registers. */ +namespace zero_regs_flags { + const unsigned int UNSET = 0; + const unsigned int SKIP = 1UL << 0; + const unsigned int ONLY_USED = 1UL << 1; + const unsigned int ONLY_GPR = 1UL << 2; + const unsigned int ONLY_ARG = 1UL << 3; + const unsigned int ENABLED = 1UL << 4; + const unsigned int USED_GPR_ARG = ENABLED | ONLY_USED | ONLY_GPR | ONLY_ARG; + const unsigned int USED_GPR = ENABLED | ONLY_USED | ONLY_GPR; + const unsigned int USED_ARG = ENABLED | ONLY_USED | ONLY_ARG; + const unsigned int USED = ENABLED | ONLY_USED; + const unsigned int ALL_GPR_ARG = ENABLED | ONLY_GPR | ONLY_ARG; + const unsigned int ALL_GPR = ENABLED | ONLY_GPR; + const unsigned int ALL_ARG = ENABLED | ONLY_ARG; + const unsigned int ALL = ENABLED; +} + +/* Settings of flag_incremental_link. */ +enum incremental_link { + INCREMENTAL_LINK_NONE, + /* Do incremental linking and produce binary. */ + INCREMENTAL_LINK_NOLTO, + /* Do incremental linking and produce IL. */ + INCREMENTAL_LINK_LTO +}; + +/* Different trace modes. */ +enum sanitize_coverage_code { + /* Trace PC. */ + SANITIZE_COV_TRACE_PC = 1 << 0, + /* Trace Comparison. */ + SANITIZE_COV_TRACE_CMP = 1 << 1 +}; + +/* flag_vtable_verify initialization levels. */ +enum vtv_priority { + VTV_NO_PRIORITY = 0, /* i.E. Do NOT do vtable verification. */ + VTV_STANDARD_PRIORITY = 1, + VTV_PREINIT_PRIORITY = 2 +}; + +/* flag_lto_partition initialization values. */ +enum lto_partition_model { + LTO_PARTITION_NONE = 0, + LTO_PARTITION_ONE = 1, + LTO_PARTITION_BALANCED = 2, + LTO_PARTITION_1TO1 = 3, + LTO_PARTITION_MAX = 4 +}; + +/* flag_lto_linker_output initialization values. */ +enum lto_linker_output { + LTO_LINKER_OUTPUT_UNKNOWN, + LTO_LINKER_OUTPUT_REL, + LTO_LINKER_OUTPUT_NOLTOREL, + LTO_LINKER_OUTPUT_DYN, + LTO_LINKER_OUTPUT_PIE, + LTO_LINKER_OUTPUT_EXEC +}; + +/* gfortran -finit-real= values. */ + +enum gfc_init_local_real +{ + GFC_INIT_REAL_OFF = 0, + GFC_INIT_REAL_ZERO, + GFC_INIT_REAL_NAN, + GFC_INIT_REAL_SNAN, + GFC_INIT_REAL_INF, + GFC_INIT_REAL_NEG_INF +}; + +/* gfortran -fcoarray= values. */ + +enum gfc_fcoarray +{ + GFC_FCOARRAY_NONE = 0, + GFC_FCOARRAY_SINGLE, + GFC_FCOARRAY_LIB +}; + + +/* gfortran -fconvert= values; used for unformatted I/O. + Keep in sync with GFC_CONVERT_* in gcc/fortran/libgfortran.h. */ +enum gfc_convert +{ + GFC_FLAG_CONVERT_NATIVE = 0, + GFC_FLAG_CONVERT_SWAP, + GFC_FLAG_CONVERT_BIG, + GFC_FLAG_CONVERT_LITTLE, + GFC_FLAG_CONVERT_R16_IEEE = 4, + GFC_FLAG_CONVERT_R16_IEEE_SWAP, + GFC_FLAG_CONVERT_R16_IEEE_BIG, + GFC_FLAG_CONVERT_R16_IEEE_LITTLE, + GFC_FLAG_CONVERT_R16_IBM = 8, + GFC_FLAG_CONVERT_R16_IBM_SWAP, + GFC_FLAG_CONVERT_R16_IBM_BIG, + GFC_FLAG_CONVERT_R16_IBM_LITTLE, +}; + + +/* Control-Flow Protection values. */ +enum cf_protection_level +{ + CF_NONE = 0, + CF_BRANCH = 1 << 0, + CF_RETURN = 1 << 1, + CF_FULL = CF_BRANCH | CF_RETURN, + CF_SET = 1 << 2, + CF_CHECK = 1 << 3 +}; + +/* Parloops schedule type. */ +enum parloops_schedule_type +{ + PARLOOPS_SCHEDULE_STATIC = 0, + PARLOOPS_SCHEDULE_DYNAMIC, + PARLOOPS_SCHEDULE_GUIDED, + PARLOOPS_SCHEDULE_AUTO, + PARLOOPS_SCHEDULE_RUNTIME +}; + +/* Ranger debug mode. */ +enum ranger_debug +{ + RANGER_DEBUG_NONE = 0, + RANGER_DEBUG_TRACE = 1, + RANGER_DEBUG_CACHE = 2, + RANGER_DEBUG_GORI = 4, + RANGER_DEBUG_TRACE_GORI = (RANGER_DEBUG_TRACE | RANGER_DEBUG_GORI), + RANGER_DEBUG_TRACE_CACHE = (RANGER_DEBUG_TRACE | RANGER_DEBUG_CACHE), + RANGER_DEBUG_ALL = (RANGER_DEBUG_GORI | RANGER_DEBUG_CACHE + | RANGER_DEBUG_TRACE) +}; + +/* Jump threader verbose dumps. */ +enum threader_debug +{ + THREADER_DEBUG_NONE = 0, + THREADER_DEBUG_ALL = 1 +}; + +/* EVRP mode. */ +enum evrp_mode +{ + EVRP_MODE_RVRP_ONLY, + EVRP_MODE_EVRP_ONLY, + EVRP_MODE_EVRP_FIRST, + EVRP_MODE_RVRP_FIRST +}; + +/* VRP modes. */ +enum vrp_mode +{ + VRP_MODE_VRP, + VRP_MODE_RANGER +}; + +/* Modes of OpenACC 'kernels' constructs handling. */ +enum openacc_kernels +{ + OPENACC_KERNELS_DECOMPOSE, + OPENACC_KERNELS_PARLOOPS +}; + +/* Modes of OpenACC privatization diagnostics. */ +enum openacc_privatization +{ + OPENACC_PRIVATIZATION_QUIET, + OPENACC_PRIVATIZATION_NOISY +}; + +#endif + +#endif /* ! GCC_FLAG_TYPES_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/flags.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/flags.h new file mode 100644 index 0000000..212e357 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/flags.h @@ -0,0 +1,117 @@ +/* Compilation switch flag definitions for GCC. + Copyright (C) 1987-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_FLAGS_H +#define GCC_FLAGS_H + +#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS) + +/* Names of fundamental debug info formats indexed by enum + debug_info_type. */ + +extern const char *const debug_type_names[]; + +/* Get enum debug_info_type of the specified debug format, for error messages. + Can be used only for individual debug format types. */ + +extern enum debug_info_type debug_set_to_format (uint32_t debug_info_set); + +/* Get the number of debug formats enabled for output. */ + +unsigned int debug_set_count (uint32_t w_symbols); + +/* Get the names of the debug formats enabled for output. */ + +const char * debug_set_names (uint32_t w_symbols); + +#ifndef GENERATOR_FILE +/* Return true iff BTF debug info is enabled. */ + +extern bool btf_debuginfo_p (); + +/* Return true iff BTF with CO-RE debug info is enabled. */ + +extern bool btf_with_core_debuginfo_p (); + +/* Return true iff CTF debug info is enabled. */ + +extern bool ctf_debuginfo_p (); + +/* Return true iff DWARF2 debug info is enabled. */ + +extern bool dwarf_debuginfo_p (struct gcc_options *opts = &global_options); + +/* Return true iff the debug info format is to be generated based on DWARF + DIEs (like CTF and BTF debug info formats). */ + +extern bool dwarf_based_debuginfo_p (); +#endif + +extern void strip_off_ending (char *, int); +extern int base_of_path (const char *path, const char **base_out); + +/* Return true iff flags are set as if -ffast-math. */ +extern bool fast_math_flags_set_p (const struct gcc_options *); +extern bool fast_math_flags_struct_set_p (struct cl_optimization *); + + +/* Now the symbols that are set with `-f' switches. */ + +/* True if printing into -fdump-final-insns= dump. */ + +extern bool final_insns_dump_p; + + +/* Other basic status info about current function. */ + +class target_flag_state +{ +public: + /* Each falign-foo can generate up to two levels of alignment: + -falign-foo=N:M[:N2:M2] */ + align_flags x_align_loops; + align_flags x_align_jumps; + align_flags x_align_labels; + align_flags x_align_functions; +}; + +extern class target_flag_state default_target_flag_state; +#if SWITCHABLE_TARGET +extern class target_flag_state *this_target_flag_state; +#else +#define this_target_flag_state (&default_target_flag_state) +#endif + +#define align_loops (this_target_flag_state->x_align_loops) +#define align_jumps (this_target_flag_state->x_align_jumps) +#define align_labels (this_target_flag_state->x_align_labels) +#define align_functions (this_target_flag_state->x_align_functions) + +/* Returns TRUE if generated code should match ABI version N or + greater is in use. */ + +#define abi_version_at_least(N) \ + (flag_abi_version == 0 || flag_abi_version >= (N)) + +/* Whether to emit an overflow warning whose code is C. */ +#define issue_strict_overflow_warning(c) (warn_strict_overflow >= (int) (c)) + +#endif + +#endif /* ! GCC_FLAGS_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/fold-const-call.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/fold-const-call.h new file mode 100644 index 0000000..becd5a1 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/fold-const-call.h @@ -0,0 +1,28 @@ +/* Fold calls to built-in and internal functions with constant arguments. + Copyright (C) 2015-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_FOLD_CONST_CALL_H +#define GCC_FOLD_CONST_CALL_H + +tree fold_const_call (combined_fn, tree, tree); +tree fold_const_call (combined_fn, tree, tree, tree); +tree fold_const_call (combined_fn, tree, tree, tree, tree); +tree build_cmp_result (tree type, int res); + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/fold-const.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/fold-const.h new file mode 100644 index 0000000..a4ff554 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/fold-const.h @@ -0,0 +1,270 @@ +/* Fold a constant sub-tree into a single node for C-compiler + Copyright (C) 1987-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_FOLD_CONST_H +#define GCC_FOLD_CONST_H + +/* Nonzero if we are folding constants inside an initializer or a C++ + manifestly-constant-evaluated context; zero otherwise. + Should be used when folding in initializer enables additional + optimizations. */ +extern int folding_initializer; +/* Nonzero if we are folding C++ manifestly-constant-evaluated context; zero + otherwise. + Should be used when certain constructs shouldn't be optimized + during folding in that context. */ +extern bool folding_cxx_constexpr; + +/* Convert between trees and native memory representation. */ +extern int native_encode_expr (const_tree, unsigned char *, int, int off = -1); +extern int native_encode_initializer (tree, unsigned char *, int, + int off = -1, unsigned char * = nullptr); +extern tree native_interpret_expr (tree, const unsigned char *, int); +extern tree native_interpret_real (tree, const unsigned char *, int); +extern bool can_native_interpret_type_p (tree); +extern tree native_interpret_aggregate (tree, const unsigned char *, int, int); +extern tree find_bitfield_repr_type (int, int); +extern void shift_bytes_in_array_left (unsigned char *, unsigned int, + unsigned int); +extern void shift_bytes_in_array_right (unsigned char *, unsigned int, + unsigned int); + +/* Fold constants as much as possible in an expression. + Returns the simplified expression. + Acts only on the top level of the expression; + if the argument itself cannot be simplified, its + subexpressions are not changed. */ + +extern tree fold (tree); +extern tree fold_init (tree); +#define fold_unary(CODE,T1,T2)\ + fold_unary_loc (UNKNOWN_LOCATION, CODE, T1, T2) +extern tree fold_unary_loc (location_t, enum tree_code, tree, tree); +#define fold_unary_ignore_overflow(CODE,T1,T2)\ + fold_unary_ignore_overflow_loc (UNKNOWN_LOCATION, CODE, T1, T2) +extern tree fold_unary_ignore_overflow_loc (location_t, enum tree_code, tree, tree); +#define fold_binary(CODE,T1,T2,T3)\ + fold_binary_loc (UNKNOWN_LOCATION, CODE, T1, T2, T3) +extern tree fold_binary_loc (location_t, enum tree_code, tree, tree, tree); +#define fold_ternary(CODE,T1,T2,T3,T4)\ + fold_ternary_loc (UNKNOWN_LOCATION, CODE, T1, T2, T3, T4) +extern tree fold_ternary_loc (location_t, enum tree_code, tree, tree, tree, tree); +#define fold_build1(c,t1,t2)\ + fold_build1_loc (UNKNOWN_LOCATION, c, t1, t2 MEM_STAT_INFO) +extern tree fold_build1_loc (location_t, enum tree_code, tree, + tree CXX_MEM_STAT_INFO); +#define fold_build2(c,t1,t2,t3)\ + fold_build2_loc (UNKNOWN_LOCATION, c, t1, t2, t3 MEM_STAT_INFO) +extern tree fold_build2_loc (location_t, enum tree_code, tree, tree, + tree CXX_MEM_STAT_INFO); +#define fold_build3(c,t1,t2,t3,t4)\ + fold_build3_loc (UNKNOWN_LOCATION, c, t1, t2, t3, t4 MEM_STAT_INFO) +extern tree fold_build3_loc (location_t, enum tree_code, tree, tree, tree, + tree CXX_MEM_STAT_INFO); +extern tree fold_build1_initializer_loc (location_t, enum tree_code, tree, tree); +extern tree fold_build2_initializer_loc (location_t, enum tree_code, tree, tree, tree); +#define fold_build_call_array(T1,T2,N,T4)\ + fold_build_call_array_loc (UNKNOWN_LOCATION, T1, T2, N, T4) +extern tree fold_build_call_array_loc (location_t, tree, tree, int, tree *); +#define fold_build_call_array_initializer(T1,T2,N,T4)\ + fold_build_call_array_initializer_loc (UNKNOWN_LOCATION, T1, T2, N, T4) +extern tree fold_build_call_array_initializer_loc (location_t, tree, tree, int, tree *); +extern tree fold_binary_initializer_loc (location_t, tree_code, tree, tree, tree); +extern tree get_array_ctor_element_at_index (tree, offset_int, + unsigned * = NULL); +extern bool fold_convertible_p (const_tree, const_tree); +#define fold_convert(T1,T2)\ + fold_convert_loc (UNKNOWN_LOCATION, T1, T2) +extern tree fold_convert_loc (location_t, tree, tree); +extern tree fold_single_bit_test (location_t, enum tree_code, tree, tree, tree); +extern tree fold_ignored_result (tree); +extern tree fold_abs_const (tree, tree); +extern tree fold_indirect_ref_1 (location_t, tree, tree); +extern void fold_defer_overflow_warnings (void); +extern void fold_undefer_overflow_warnings (bool, const gimple *, int); +extern void fold_undefer_and_ignore_overflow_warnings (void); +extern bool fold_deferring_overflow_warnings_p (void); +extern void fold_overflow_warning (const char*, enum warn_strict_overflow_code); +extern enum tree_code fold_div_compare (enum tree_code, tree, tree, + tree *, tree *, bool *); +extern bool operand_equal_p (const_tree, const_tree, unsigned int flags = 0); +extern int multiple_of_p (tree, const_tree, const_tree, bool = true); +#define omit_one_operand(T1,T2,T3)\ + omit_one_operand_loc (UNKNOWN_LOCATION, T1, T2, T3) +extern tree omit_one_operand_loc (location_t, tree, tree, tree); +#define omit_two_operands(T1,T2,T3,T4)\ + omit_two_operands_loc (UNKNOWN_LOCATION, T1, T2, T3, T4) +extern tree omit_two_operands_loc (location_t, tree, tree, tree, tree); +#define invert_truthvalue(T)\ + invert_truthvalue_loc (UNKNOWN_LOCATION, T) +extern tree invert_truthvalue_loc (location_t, tree); +extern tree fold_unary_to_constant (enum tree_code, tree, tree); +extern tree fold_binary_to_constant (enum tree_code, tree, tree, tree); +extern tree fold_bit_and_mask (tree, tree, enum tree_code, + tree, enum tree_code, tree, tree, + tree, enum tree_code, tree, tree, tree *); +extern tree fold_read_from_constant_string (tree); +extern tree fold_read_from_vector (tree, poly_uint64); +#if GCC_VEC_PERN_INDICES_H +extern tree fold_vec_perm (tree, tree, tree, const vec_perm_indices &); +#endif +extern bool wide_int_binop (wide_int &res, enum tree_code, + const wide_int &arg1, const wide_int &arg2, + signop, wi::overflow_type *); +extern tree int_const_binop (enum tree_code, const_tree, const_tree, int = 1); +#define build_fold_addr_expr(T)\ + build_fold_addr_expr_loc (UNKNOWN_LOCATION, (T)) +extern tree build_fold_addr_expr_loc (location_t, tree); +#define build_fold_addr_expr_with_type(T,TYPE)\ + build_fold_addr_expr_with_type_loc (UNKNOWN_LOCATION, (T), TYPE) +extern tree build_fold_addr_expr_with_type_loc (location_t, tree, tree); +extern tree fold_build_cleanup_point_expr (tree type, tree expr); +#define build_fold_indirect_ref(T)\ + build_fold_indirect_ref_loc (UNKNOWN_LOCATION, T) +extern tree build_fold_indirect_ref_loc (location_t, tree); +#define fold_indirect_ref(T)\ + fold_indirect_ref_loc (UNKNOWN_LOCATION, T) +extern tree fold_indirect_ref_loc (location_t, tree); +extern tree build_simple_mem_ref_loc (location_t, tree); +#define build_simple_mem_ref(T)\ + build_simple_mem_ref_loc (UNKNOWN_LOCATION, T) +extern poly_offset_int mem_ref_offset (const_tree); +extern tree build_invariant_address (tree, tree, poly_int64); +extern tree constant_boolean_node (bool, tree); +extern tree div_if_zero_remainder (const_tree, const_tree); + +extern bool tree_swap_operands_p (const_tree, const_tree); +extern enum tree_code swap_tree_comparison (enum tree_code); + +extern bool ptr_difference_const (tree, tree, poly_int64_pod *); +extern enum tree_code invert_tree_comparison (enum tree_code, bool); +extern bool inverse_conditions_p (const_tree, const_tree); + +extern bool tree_unary_nonzero_warnv_p (enum tree_code, tree, tree, bool *); +extern bool tree_binary_nonzero_warnv_p (enum tree_code, tree, tree, tree op1, + bool *); +extern bool tree_single_nonzero_warnv_p (tree, bool *); +extern bool tree_unary_nonnegative_warnv_p (enum tree_code, tree, tree, + bool *, int); +extern bool tree_binary_nonnegative_warnv_p (enum tree_code, tree, tree, tree, + bool *, int); +extern bool tree_single_nonnegative_warnv_p (tree, bool *, int); +extern bool tree_call_nonnegative_warnv_p (tree, combined_fn, tree, tree, + bool *, int); + +extern bool integer_valued_real_unary_p (tree_code, tree, int); +extern bool integer_valued_real_binary_p (tree_code, tree, tree, int); +extern bool integer_valued_real_call_p (combined_fn, tree, tree, int); +extern bool integer_valued_real_single_p (tree, int); +extern bool integer_valued_real_p (tree, int = 0); + +extern bool fold_real_zero_addition_p (const_tree, const_tree, const_tree, + int); +extern tree combine_comparisons (location_t, enum tree_code, enum tree_code, + enum tree_code, tree, tree, tree); +extern void debug_fold_checksum (const_tree); +extern bool may_negate_without_overflow_p (const_tree); +#define round_up(T,N) round_up_loc (UNKNOWN_LOCATION, T, N) +extern tree round_up_loc (location_t, tree, unsigned int); +#define round_down(T,N) round_down_loc (UNKNOWN_LOCATION, T, N) +extern tree round_down_loc (location_t, tree, int); +extern tree size_int_kind (poly_int64, enum size_type_kind); +#define size_binop(CODE,T1,T2)\ + size_binop_loc (UNKNOWN_LOCATION, CODE, T1, T2) +extern tree size_binop_loc (location_t, enum tree_code, tree, tree); +#define size_diffop(T1,T2)\ + size_diffop_loc (UNKNOWN_LOCATION, T1, T2) +extern tree size_diffop_loc (location_t, tree, tree); + +/* Return an expr equal to X but certainly not valid as an lvalue. */ +#define non_lvalue(T) non_lvalue_loc (UNKNOWN_LOCATION, T) +extern tree non_lvalue_loc (location_t, tree); + +extern bool tree_expr_nonzero_p (tree); +extern bool tree_expr_nonnegative_p (tree); +extern bool tree_expr_nonnegative_warnv_p (tree, bool *, int = 0); +extern bool tree_expr_finite_p (const_tree); +extern bool tree_expr_infinite_p (const_tree); +extern bool tree_expr_maybe_infinite_p (const_tree); +extern bool tree_expr_signaling_nan_p (const_tree); +extern bool tree_expr_maybe_signaling_nan_p (const_tree); +extern bool tree_expr_nan_p (const_tree); +extern bool tree_expr_maybe_nan_p (const_tree); +extern bool tree_expr_maybe_real_minus_zero_p (const_tree); +extern tree make_range (tree, int *, tree *, tree *, bool *); +extern tree make_range_step (location_t, enum tree_code, tree, tree, tree, + tree *, tree *, int *, bool *); +extern tree range_check_type (tree); +extern tree build_range_check (location_t, tree, tree, int, tree, tree); +extern bool merge_ranges (int *, tree *, tree *, int, tree, tree, int, + tree, tree); +extern tree sign_bit_p (tree, const_tree); +extern tree exact_inverse (tree, tree); +extern bool expr_not_equal_to (tree t, const wide_int &); +extern tree const_unop (enum tree_code, tree, tree); +extern tree const_binop (enum tree_code, tree, tree, tree); +extern bool negate_mathfn_p (combined_fn); +extern const char *getbyterep (tree, unsigned HOST_WIDE_INT *); +extern const char *c_getstr (tree); +extern wide_int tree_nonzero_bits (const_tree); +extern int address_compare (tree_code, tree, tree, tree, tree &, tree &, + poly_int64 &, poly_int64 &, bool); +extern tree ctor_single_nonzero_element (const_tree); + +/* Return OFF converted to a pointer offset type suitable as offset for + POINTER_PLUS_EXPR. Use location LOC for this conversion. */ +extern tree convert_to_ptrofftype_loc (location_t loc, tree off); + +#define convert_to_ptrofftype(t) convert_to_ptrofftype_loc (UNKNOWN_LOCATION, t) + +/* Build and fold a POINTER_PLUS_EXPR at LOC offsetting PTR by OFF. */ +extern tree fold_build_pointer_plus_loc (location_t loc, tree ptr, tree off); + +#define fold_build_pointer_plus(p,o) \ + fold_build_pointer_plus_loc (UNKNOWN_LOCATION, p, o) + +/* Build and fold a POINTER_PLUS_EXPR at LOC offsetting PTR by OFF. */ +extern tree fold_build_pointer_plus_hwi_loc (location_t loc, tree ptr, HOST_WIDE_INT off); + +#define fold_build_pointer_plus_hwi(p,o) \ + fold_build_pointer_plus_hwi_loc (UNKNOWN_LOCATION, p, o) + + +/* Class used to compare gimple operands. */ + +class operand_compare +{ +public: + /* Return true if two operands are equal. The flags fields can be used + to specify OEP flags described in tree-core.h. */ + virtual bool operand_equal_p (const_tree, const_tree, unsigned int flags); + + /* Generate a hash value for an expression. This can be used iteratively + by passing a previous result as the HSTATE argument. */ + virtual void hash_operand (const_tree, inchash::hash &, unsigned flags); + +protected: + /* Verify that when arguments (ARG0 and ARG1) are equal, then they have + an equal hash value. When the function knowns comparison return, + true is returned. Then RET is set to corresponding comparsion result. */ + bool verify_hash_value (const_tree arg0, const_tree arg1, unsigned int flags, + bool *ret); +}; + +#endif // GCC_FOLD_CONST_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/function-abi.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/function-abi.h new file mode 100644 index 0000000..388f5d8 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/function-abi.h @@ -0,0 +1,320 @@ +/* Information about function binary interfaces. + Copyright (C) 2019-2022 Free Software Foundation, Inc. + +This file is part of GCC + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_FUNCTION_ABI_H +#define GCC_FUNCTION_ABI_H + +/* Most targets use the same ABI for all functions in a translation + unit, but some targets support interoperability between several ABIs. + Each such ABI has a unique 0-based identifier, with 0 always being + the default choice of ABI. + + NUM_ABI_IDS is the maximum number of such ABIs that GCC can handle at once. + A bitfield with this number of bits can represent any combinaion of the + supported ABIs. */ +const size_t NUM_ABI_IDS = 8; + +/* Information about one of the target's predefined ABIs. */ +class predefined_function_abi +{ +public: + /* A target-specific identifier for this ABI. The value must be in + the range [0, NUM_ABI_IDS - 1]. */ + unsigned int id () const { return m_id; } + + /* True if this ABI has been initialized. */ + bool initialized_p () const { return m_initialized; } + + /* Return true if a function call is allowed to alter every bit of + register REGNO, so that the register contains an arbitrary value + on return. If so, the register cannot hold any part of a value + that is live across a call. */ + bool + clobbers_full_reg_p (unsigned int regno) const + { + return TEST_HARD_REG_BIT (m_full_reg_clobbers, regno); + } + + /* Return true if a function call is allowed to alter some or all bits + of register REGNO. + + This is true whenever clobbers_full_reg_p (REGNO) is true. It is + also true if, for example, the ABI says that a call must preserve the + low 32 or 64 bits of REGNO, but can clobber the upper bits of REGNO. + In the latter case, it is possible for REGNO to hold values that + are live across a call, provided that the value occupies only the + call-preserved part of the register. */ + bool + clobbers_at_least_part_of_reg_p (unsigned int regno) const + { + return TEST_HARD_REG_BIT (m_full_and_partial_reg_clobbers, regno); + } + + /* Return true if a function call is allowed to clobber at least part + of (reg:MODE REGNO). If so, it is not possible for the register + as a whole to be live across a call. */ + bool + clobbers_reg_p (machine_mode mode, unsigned int regno) const + { + return overlaps_hard_reg_set_p (m_mode_clobbers[mode], mode, regno); + } + + /* Return the set of registers that a function call is allowed to + alter completely, so that the registers contain arbitrary values + on return. This doesn't include registers that a call can only + partly clobber (as per TARGET_HARD_REGNO_CALL_PART_CLOBBERED). + + These registers cannot hold any part of a value that is live across + a call. */ + HARD_REG_SET full_reg_clobbers () const { return m_full_reg_clobbers; } + + /* Return the set of registers that a function call is allowed to alter + to some degree. For example, if an ABI says that a call must preserve + the low 32 or 64 bits of a register R, but can clobber the upper bits + of R, R would be in this set but not in full_reg_clobbers (). + + This set is a superset of full_reg_clobbers (). It is possible for a + register in full_and_partial_reg_clobbers () & ~full_reg_clobbers () + to contain values that are live across a call, provided that the live + value only occupies the call-preserved part of the register. */ + HARD_REG_SET + full_and_partial_reg_clobbers () const + { + return m_full_and_partial_reg_clobbers; + } + + /* Return the set of registers that cannot be used to hold a value of + mode MODE across a function call. That is: + + (reg:REGNO MODE) + + might be clobbered by a call whenever: + + overlaps_hard_reg_set (mode_clobbers (MODE), MODE, REGNO) + + In allocation terms, the registers in the returned set conflict + with any value of mode MODE that is live across a call. */ + HARD_REG_SET + mode_clobbers (machine_mode mode) const + { + return m_mode_clobbers[mode]; + } + + void initialize (unsigned int, const_hard_reg_set); + void add_full_reg_clobber (unsigned int); + +private: + unsigned int m_id : NUM_ABI_IDS; + unsigned int m_initialized : 1; + HARD_REG_SET m_full_reg_clobbers; + HARD_REG_SET m_full_and_partial_reg_clobbers; + HARD_REG_SET m_mode_clobbers[NUM_MACHINE_MODES]; +}; + +/* Describes either a predefined ABI or the ABI of a particular function. + In the latter case, the ABI might make use of extra function-specific + information, such as for -fipa-ra. */ +class function_abi +{ +public: + /* Initialize the structure for a general function with the given ABI. */ + function_abi (const predefined_function_abi &base_abi) + : m_base_abi (&base_abi), + m_mask (base_abi.full_and_partial_reg_clobbers ()) {} + + /* Initialize the structure for a function that has the given ABI and + that is known not to clobber registers outside MASK. */ + function_abi (const predefined_function_abi &base_abi, + const_hard_reg_set mask) + : m_base_abi (&base_abi), m_mask (mask) {} + + /* The predefined ABI from which this ABI is derived. */ + const predefined_function_abi &base_abi () const { return *m_base_abi; } + + /* The target-specific identifier of the predefined ABI. */ + unsigned int id () const { return m_base_abi->id (); } + + /* See the corresponding predefined_function_abi functions for + details about the following functions. */ + + HARD_REG_SET + full_reg_clobbers () const + { + return m_mask & m_base_abi->full_reg_clobbers (); + } + + HARD_REG_SET + full_and_partial_reg_clobbers () const + { + return m_mask & m_base_abi->full_and_partial_reg_clobbers (); + } + + HARD_REG_SET + mode_clobbers (machine_mode mode) const + { + return m_mask & m_base_abi->mode_clobbers (mode); + } + + bool + clobbers_full_reg_p (unsigned int regno) const + { + return (TEST_HARD_REG_BIT (m_mask, regno) + & m_base_abi->clobbers_full_reg_p (regno)); + } + + bool + clobbers_at_least_part_of_reg_p (unsigned int regno) const + { + return (TEST_HARD_REG_BIT (m_mask, regno) + & m_base_abi->clobbers_at_least_part_of_reg_p (regno)); + } + + bool + clobbers_reg_p (machine_mode mode, unsigned int regno) const + { + return overlaps_hard_reg_set_p (mode_clobbers (mode), mode, regno); + } + + bool + operator== (const function_abi &other) const + { + return m_base_abi == other.m_base_abi && m_mask == other.m_mask; + } + + bool + operator!= (const function_abi &other) const + { + return !operator== (other); + } + +protected: + const predefined_function_abi *m_base_abi; + HARD_REG_SET m_mask; +}; + +/* This class collects information about the ABIs of functions that are + called in a particular region of code. It is mostly intended to be + used as a local variable during an IR walk. */ +class function_abi_aggregator +{ +public: + function_abi_aggregator () : m_abi_clobbers () {} + + /* Record that the code region calls a function with the given ABI. */ + void + note_callee_abi (const function_abi &abi) + { + m_abi_clobbers[abi.id ()] |= abi.full_and_partial_reg_clobbers (); + } + + HARD_REG_SET caller_save_regs (const function_abi &) const; + +private: + HARD_REG_SET m_abi_clobbers[NUM_ABI_IDS]; +}; + +struct target_function_abi_info +{ + /* An array of all the target ABIs that are available in this + translation unit. Not all entries are used for all targets, + but the structures are relatively small, and using a fixed-size + array avoids extra indirection. + + There are various ways of getting an ABI descriptor: + + * fndecl_abi (FNDECL) is the ABI of function FNDECL. + + * fntype_abi (FNTYPE) is the ABI of a function with type FNTYPE. + + * crtl->abi is the ABI of the function that we are currently + compiling to rtl. + + * insn_callee_abi (INSN) is the ABI used by the target of call insn INSN. + + * eh_edge_abi is the "ABI" used when taking an EH edge from an + exception-throwing statement to an exception handler. Catching + exceptions from calls can be treated as an abnormal return from + those calls, and this ABI therefore describes the ABI of functions + on such an abnormal return. Statements that throw non-call + exceptions can be treated as being implicitly wrapped in a call + that has such an abnormal return. + + At present, no target needs to support more than one EH ABI. + + * function_abis[N] is the ABI with identifier N. This can be useful + when referring back to ABIs that have been collected by number in + a bitmask, such as after walking function calls in a particular + region of code. + + * default_function_abi refers specifically to the target's default + choice of ABI, regardless of which (if any) functions actually + use it. This ABI and data derived from it do *not* provide + globally conservatively-correct information, so it is only + useful in very specific circumstances. */ + predefined_function_abi x_function_abis[NUM_ABI_IDS]; +}; + +extern target_function_abi_info default_target_function_abi_info; +#if SWITCHABLE_TARGET +extern target_function_abi_info *this_target_function_abi_info; +#else +#define this_target_function_abi_info (&default_target_function_abi_info) +#endif + +/* See the comment above x_function_abis for when these macros should be used. + At present, eh_edge_abi is always the default ABI, but that could change + in future if a target needs it to. */ +#define function_abis \ + (this_target_function_abi_info->x_function_abis) +#define default_function_abi \ + (this_target_function_abi_info->x_function_abis[0]) +#define eh_edge_abi default_function_abi + +extern HARD_REG_SET call_clobbers_in_region (unsigned int, const_hard_reg_set, + machine_mode mode); + +/* Return true if (reg:MODE REGNO) might be clobbered by one of the + calls in a region described by ABIS and MASK, where: + + * Bit ID of ABIS is set if the region contains a call with + function_abi identifier ID. + + * MASK contains all the registers that are fully or partially + clobbered by calls in the region. + + This is not quite as accurate as testing each individual call, + but it's a close and conservatively-correct approximation. + It's much better for some targets than: + + overlaps_hard_reg_set_p (MASK, MODE, REGNO). */ + +inline bool +call_clobbered_in_region_p (unsigned int abis, const_hard_reg_set mask, + machine_mode mode, unsigned int regno) +{ + HARD_REG_SET clobbers = call_clobbers_in_region (abis, mask, mode); + return overlaps_hard_reg_set_p (clobbers, mode, regno); +} + +extern const predefined_function_abi &fntype_abi (const_tree); +extern function_abi fndecl_abi (const_tree); +extern function_abi insn_callee_abi (const rtx_insn *); +extern function_abi expr_callee_abi (const_tree); + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/function.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/function.h new file mode 100644 index 0000000..0986137 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/function.h @@ -0,0 +1,722 @@ +/* Structure for saving state for a nested function. + Copyright (C) 1989-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_FUNCTION_H +#define GCC_FUNCTION_H + + +/* Stack of pending (incomplete) sequences saved by `start_sequence'. + Each element describes one pending sequence. + The main insn-chain is saved in the last element of the chain, + unless the chain is empty. */ + +struct GTY(()) sequence_stack { + /* First and last insns in the chain of the saved sequence. */ + rtx_insn *first; + rtx_insn *last; + struct sequence_stack *next; +}; + +struct GTY(()) emit_status { + void ensure_regno_capacity (); + + /* This is reset to LAST_VIRTUAL_REGISTER + 1 at the start of each function. + After rtl generation, it is 1 plus the largest register number used. */ + int x_reg_rtx_no; + + /* Lowest label number in current function. */ + int x_first_label_num; + + /* seq.first and seq.last are the ends of the doubly-linked chain of + rtl for the current function. Both are reset to null at the + start of rtl generation for the function. + + start_sequence saves both of these on seq.next and then starts + a new, nested sequence of insns. + + seq.next is a stack of pending (incomplete) sequences saved by + start_sequence. Each element describes one pending sequence. + The main insn-chain is the last element of the chain. */ + struct sequence_stack seq; + + /* INSN_UID for next insn emitted. + Reset to 1 for each function compiled. */ + int x_cur_insn_uid; + + /* INSN_UID for next debug insn emitted. Only used if + --param min-nondebug-insn-uid= is given with nonzero value. */ + int x_cur_debug_insn_uid; + + /* The length of the regno_pointer_align, regno_decl, and x_regno_reg_rtx + vectors. Since these vectors are needed during the expansion phase when + the total number of registers in the function is not yet known, the + vectors are copied and made bigger when necessary. */ + int regno_pointer_align_length; + + /* Indexed by pseudo register number, if nonzero gives the known alignment + for that pseudo (if REG_POINTER is set in x_regno_reg_rtx). + Allocated in parallel with x_regno_reg_rtx. */ + unsigned char * GTY((skip)) regno_pointer_align; +}; + + +/* Indexed by register number, gives an rtx for that register (and only + that register). For pseudo registers, it is the unique rtx for + that pseudo. For hard registers, it is an rtx of the mode specified + by reg_raw_mode. + + FIXME: We could put it into emit_status struct, but gengtype is not + able to deal with length attribute nested in top level structures. */ + +extern GTY ((length ("crtl->emit.x_reg_rtx_no"))) rtx * regno_reg_rtx; + +/* For backward compatibility... eventually these should all go away. */ +#define reg_rtx_no (crtl->emit.x_reg_rtx_no) + +#define REGNO_POINTER_ALIGN(REGNO) (crtl->emit.regno_pointer_align[REGNO]) + +struct GTY(()) expr_status { + /* Number of units that we should eventually pop off the stack. + These are the arguments to function calls that have already returned. */ + poly_int64_pod x_pending_stack_adjust; + + /* Under some ABIs, it is the caller's responsibility to pop arguments + pushed for function calls. A naive implementation would simply pop + the arguments immediately after each call. However, if several + function calls are made in a row, it is typically cheaper to pop + all the arguments after all of the calls are complete since a + single pop instruction can be used. Therefore, GCC attempts to + defer popping the arguments until absolutely necessary. (For + example, at the end of a conditional, the arguments must be popped, + since code outside the conditional won't know whether or not the + arguments need to be popped.) + + When INHIBIT_DEFER_POP is nonzero, however, the compiler does not + attempt to defer pops. Instead, the stack is popped immediately + after each call. Rather then setting this variable directly, use + NO_DEFER_POP and OK_DEFER_POP. */ + int x_inhibit_defer_pop; + + /* If PREFERRED_STACK_BOUNDARY and PUSH_ROUNDING are defined, the stack + boundary can be momentarily unaligned while pushing the arguments. + Record the delta since last aligned boundary here in order to get + stack alignment in the nested function calls working right. */ + poly_int64_pod x_stack_pointer_delta; + + /* Nonzero means __builtin_saveregs has already been done in this function. + The value is the pseudoreg containing the value __builtin_saveregs + returned. */ + rtx x_saveregs_value; + + /* Similarly for __builtin_apply_args. */ + rtx x_apply_args_value; + + /* List of labels that must never be deleted. */ + vec *x_forced_labels; +}; + +typedef struct call_site_record_d *call_site_record; + +/* RTL representation of exception handling. */ +struct GTY(()) rtl_eh { + rtx ehr_stackadj; + rtx ehr_handler; + rtx_code_label *ehr_label; + + rtx sjlj_fc; + rtx_insn *sjlj_exit_after; + + vec *action_record_data; + + vec *call_site_record_v[2]; +}; + +#define pending_stack_adjust (crtl->expr.x_pending_stack_adjust) +#define inhibit_defer_pop (crtl->expr.x_inhibit_defer_pop) +#define saveregs_value (crtl->expr.x_saveregs_value) +#define apply_args_value (crtl->expr.x_apply_args_value) +#define forced_labels (crtl->expr.x_forced_labels) +#define stack_pointer_delta (crtl->expr.x_stack_pointer_delta) + +struct gimple_df; +struct call_site_record_d; +struct dw_fde_node; +class range_query; + +struct GTY(()) varasm_status { + /* If we're using a per-function constant pool, this is it. */ + struct rtx_constant_pool *pool; + + /* Number of tree-constants deferred during the expansion of this + function. */ + unsigned int deferred_constants; +}; + + +/* Data for function partitioning. */ +struct GTY(()) function_subsections { + /* Assembly labels for the hot and cold text sections, to + be used by debugger functions for determining the size of text + sections. */ + + const char *hot_section_label; + const char *cold_section_label; + const char *hot_section_end_label; + const char *cold_section_end_label; +}; + +/* Describe an empty area of space in the stack frame. These can be chained + into a list; this is used to keep track of space wasted for alignment + reasons. */ +class GTY(()) frame_space +{ +public: + class frame_space *next; + + poly_int64 start; + poly_int64 length; +}; + +/* Describe emitted calls for -fcallgraph-info. */ +struct GTY(()) callinfo_callee +{ + location_t location; + tree decl; +}; + +/* Describe dynamic allocation for -fcallgraph-info=da. */ +struct GTY(()) callinfo_dalloc +{ + location_t location; + char const *name; +}; + +class GTY(()) stack_usage +{ +public: + /* # of bytes of static stack space allocated by the function. */ + HOST_WIDE_INT static_stack_size; + + /* # of bytes of dynamic stack space allocated by the function. This is + meaningful only if has_unbounded_dynamic_stack_size is zero. */ + HOST_WIDE_INT dynamic_stack_size; + + /* Upper bound on the number of bytes pushed onto the stack after the + prologue. If !ACCUMULATE_OUTGOING_ARGS, it contains the outgoing + arguments. */ + poly_int64 pushed_stack_size; + + /* Nonzero if the amount of stack space allocated dynamically cannot + be bounded at compile-time. */ + unsigned int has_unbounded_dynamic_stack_size : 1; + + /* Functions called within the function, if callgraph is enabled. */ + vec *callees; + + /* Dynamic allocations encountered within the function, if callgraph + da is enabled. */ + vec *dallocs; +}; + +#define current_function_static_stack_size (cfun->su->static_stack_size) +#define current_function_dynamic_stack_size (cfun->su->dynamic_stack_size) +#define current_function_pushed_stack_size (cfun->su->pushed_stack_size) +#define current_function_has_unbounded_dynamic_stack_size \ + (cfun->su->has_unbounded_dynamic_stack_size) +#define current_function_allocates_dynamic_stack_space \ + (current_function_dynamic_stack_size != 0 \ + || current_function_has_unbounded_dynamic_stack_size) + +/* This structure can save all the important global and static variables + describing the status of the current function. */ + +struct GTY(()) function { + struct eh_status *eh; + + /* The control flow graph for this function. */ + struct control_flow_graph *cfg; + + /* GIMPLE body for this function. */ + gimple_seq gimple_body; + + /* SSA and dataflow information. */ + struct gimple_df *gimple_df; + + /* The loops in this function. */ + struct loops *x_current_loops; + + /* Filled by the GIMPLE and RTL FEs, pass to start compilation with. */ + char *pass_startwith; + + /* The stack usage of this function. */ + class stack_usage *su; + + /* Value histograms attached to particular statements. */ + htab_t GTY((skip)) value_histograms; + + /* For function.cc. */ + + /* Points to the FUNCTION_DECL of this function. */ + tree decl; + + /* A PARM_DECL that should contain the static chain for this function. + It will be initialized at the beginning of the function. */ + tree static_chain_decl; + + /* An expression that contains the non-local goto save area. The first + word is the saved frame pointer and the second is the saved stack + pointer. */ + tree nonlocal_goto_save_area; + + /* Vector of function local variables, functions, types and constants. */ + vec *local_decls; + + /* For md files. */ + + /* tm.h can use this to store whatever it likes. */ + struct machine_function * GTY ((maybe_undef)) machine; + + /* Language-specific code can use this to store whatever it likes. */ + struct language_function * language; + + /* Used types hash table. */ + hash_set *GTY (()) used_types_hash; + + /* Dwarf2 Frame Description Entry, containing the Call Frame Instructions + used for unwinding. Only set when either dwarf2 unwinding or dwarf2 + debugging is enabled. */ + struct dw_fde_node *fde; + + /* Range query mechanism for functions. The default is to pick up + global ranges. If a pass wants on-demand ranges OTOH, it must + call enable/disable_ranger(). The pointer is never null. It + should be queried by calling get_range_query(). */ + range_query * GTY ((skip)) x_range_query; + + /* Last statement uid. */ + int last_stmt_uid; + + /* Debug marker counter. Count begin stmt markers. We don't have + to keep it exact, it's more of a rough estimate to enable us to + decide whether they are too many to copy during inlining, or when + expanding to RTL. */ + int debug_marker_count; + + /* Function sequence number for profiling, debugging, etc. */ + int funcdef_no; + + /* Line number of the start of the function for debugging purposes. */ + location_t function_start_locus; + + /* Line number of the end of the function. */ + location_t function_end_locus; + + /* Properties used by the pass manager. */ + unsigned int curr_properties; + unsigned int last_verified; + + /* Different from normal TODO_flags which are handled right at the + beginning or the end of one pass execution, the pending_TODOs + are passed down in the pipeline until one of its consumers can + perform the requested action. Consumers should then clear the + flags for the actions that they have taken. */ + unsigned int pending_TODOs; + + /* Non-null if the function does something that would prevent it from + being copied; this applies to both versioning and inlining. Set to + a string describing the reason for failure. */ + const char * GTY((skip)) cannot_be_copied_reason; + + /* Last assigned dependence info clique. */ + unsigned short last_clique; + + /* Collected bit flags. */ + + /* Number of units of general registers that need saving in stdarg + function. What unit is depends on the backend, either it is number + of bytes, or it can be number of registers. */ + unsigned int va_list_gpr_size : 8; + + /* Number of units of floating point registers that need saving in stdarg + function. */ + unsigned int va_list_fpr_size : 8; + + /* Nonzero if function being compiled can call setjmp. */ + unsigned int calls_setjmp : 1; + + /* Nonzero if function being compiled can call alloca, + either as a subroutine or builtin. */ + unsigned int calls_alloca : 1; + + /* Nonzero if function being compiled can call __builtin_eh_return. */ + unsigned int calls_eh_return : 1; + + /* Nonzero if function being compiled receives nonlocal gotos + from nested functions. */ + unsigned int has_nonlocal_label : 1; + + /* Nonzero if function being compiled has a forced label + placed into static storage. */ + unsigned int has_forced_label_in_static : 1; + + /* Nonzero if we've set cannot_be_copied_reason. I.e. if + (cannot_be_copied_set && !cannot_be_copied_reason), the function + can in fact be copied. */ + unsigned int cannot_be_copied_set : 1; + + /* Nonzero if current function uses stdarg.h or equivalent. */ + unsigned int stdarg : 1; + + unsigned int after_inlining : 1; + unsigned int always_inline_functions_inlined : 1; + + /* Nonzero if function being compiled can throw synchronous non-call + exceptions. */ + unsigned int can_throw_non_call_exceptions : 1; + + /* Nonzero if instructions that may throw exceptions but don't otherwise + contribute to the execution of the program can be deleted. */ + unsigned int can_delete_dead_exceptions : 1; + + /* Fields below this point are not set for abstract functions; see + allocate_struct_function. */ + + /* Nonzero if function being compiled needs to be given an address + where the value should be stored. */ + unsigned int returns_struct : 1; + + /* Nonzero if function being compiled needs to + return the address of where it has put a structure value. */ + unsigned int returns_pcc_struct : 1; + + /* Nonzero if this function has local DECL_HARD_REGISTER variables. + In this case code motion has to be done more carefully. */ + unsigned int has_local_explicit_reg_vars : 1; + + /* Nonzero if the current function is a thunk, i.e., a lightweight + function implemented by the output_mi_thunk hook) that just + adjusts one of its arguments and forwards to another + function. */ + unsigned int is_thunk : 1; + + /* Nonzero if the current function contains any loops with + loop->force_vectorize set. */ + unsigned int has_force_vectorize_loops : 1; + + /* Nonzero if the current function contains any loops with + nonzero value in loop->simduid. */ + unsigned int has_simduid_loops : 1; + + /* Nonzero when the tail call has been identified. */ + unsigned int tail_call_marked : 1; + + /* Nonzero if the current function contains a #pragma GCC unroll. */ + unsigned int has_unroll : 1; + + /* Set when the function was compiled with generation of debug + (begin stmt, inline entry, ...) markers enabled. */ + unsigned int debug_nonbind_markers : 1; + + /* Set if this is a coroutine-related function. */ + unsigned int coroutine_component : 1; + + /* Set if there are any OMP_TARGET regions in the function. */ + unsigned int has_omp_target : 1; +}; + +/* Add the decl D to the local_decls list of FUN. */ + +void add_local_decl (struct function *fun, tree d); + +#define FOR_EACH_LOCAL_DECL(FUN, I, D) \ + FOR_EACH_VEC_SAFE_ELT_REVERSE ((FUN)->local_decls, I, D) + +/* Record a final call to CALLEE at LOCATION. */ +void record_final_call (tree callee, location_t location); + +/* Record a dynamic allocation made for DECL_OR_EXP. */ +void record_dynamic_alloc (tree decl_or_exp); + +/* If va_list_[gf]pr_size is set to this, it means we don't know how + many units need to be saved. */ +#define VA_LIST_MAX_GPR_SIZE 255 +#define VA_LIST_MAX_FPR_SIZE 255 + +/* The function currently being compiled. */ +extern GTY(()) struct function *cfun; + +/* In order to ensure that cfun is not set directly, we redefine it so + that it is not an lvalue. Rather than assign to cfun, use + push_cfun or set_cfun. */ +#define cfun (cfun + 0) + +/* Nonzero if we've already converted virtual regs to hard regs. */ +extern int virtuals_instantiated; + +/* Nonzero if at least one trampoline has been created. */ +extern int trampolines_created; + +struct GTY((for_user)) types_used_by_vars_entry { + tree type; + tree var_decl; +}; + +struct used_type_hasher : ggc_ptr_hash +{ + static hashval_t hash (types_used_by_vars_entry *); + static bool equal (types_used_by_vars_entry *, types_used_by_vars_entry *); +}; + +/* Hash table making the relationship between a global variable + and the types it references in its initializer. The key of the + entry is a referenced type, and the value is the DECL of the global + variable. types_use_by_vars_do_hash and types_used_by_vars_eq below are + the hash and equality functions to use for this hash table. */ +extern GTY(()) hash_table *types_used_by_vars_hash; + +void types_used_by_var_decl_insert (tree type, tree var_decl); + +/* During parsing of a global variable, this vector contains the types + referenced by the global variable. */ +extern GTY(()) vec *types_used_by_cur_var_decl; + + +/* Return the loop tree of FN. */ + +inline struct loops * +loops_for_fn (struct function *fn) +{ + return fn->x_current_loops; +} + +/* Set the loop tree of FN to LOOPS. */ + +inline void +set_loops_for_fn (struct function *fn, struct loops *loops) +{ + gcc_checking_assert (fn->x_current_loops == NULL || loops == NULL); + fn->x_current_loops = loops; +} + +/* For backward compatibility... eventually these should all go away. */ +#define current_function_funcdef_no (cfun->funcdef_no) + +#define current_loops (cfun->x_current_loops) +#define dom_computed (cfun->cfg->x_dom_computed) +#define n_bbs_in_dom_tree (cfun->cfg->x_n_bbs_in_dom_tree) +#define VALUE_HISTOGRAMS(fun) (fun)->value_histograms + +/* A pointer to a function to create target specific, per-function + data structures. */ +extern struct machine_function * (*init_machine_status) (void); + +/* Structure to record the size of a sequence of arguments + as the sum of a tree-expression and a constant. This structure is + also used to store offsets from the stack, which might be negative, + so the variable part must be ssizetype, not sizetype. */ + +struct args_size +{ + poly_int64_pod constant; + tree var; +}; + +/* Package up various arg related fields of struct args for + locate_and_pad_parm. */ +struct locate_and_pad_arg_data +{ + /* Size of this argument on the stack, rounded up for any padding it + gets. If REG_PARM_STACK_SPACE is defined, then register parms are + counted here, otherwise they aren't. */ + struct args_size size; + /* Offset of this argument from beginning of stack-args. */ + struct args_size offset; + /* Offset to the start of the stack slot. Different from OFFSET + if this arg pads downward. */ + struct args_size slot_offset; + /* The amount that the stack pointer needs to be adjusted to + force alignment for the next argument. */ + struct args_size alignment_pad; + /* Which way we should pad this arg. */ + pad_direction where_pad; + /* slot_offset is at least this aligned. */ + unsigned int boundary; +}; + +/* Add the value of the tree INC to the `struct args_size' TO. */ + +#define ADD_PARM_SIZE(TO, INC) \ +do { \ + tree inc = (INC); \ + if (tree_fits_shwi_p (inc)) \ + (TO).constant += tree_to_shwi (inc); \ + else if ((TO).var == 0) \ + (TO).var = fold_convert (ssizetype, inc); \ + else \ + (TO).var = size_binop (PLUS_EXPR, (TO).var, \ + fold_convert (ssizetype, inc)); \ +} while (0) + +#define SUB_PARM_SIZE(TO, DEC) \ +do { \ + tree dec = (DEC); \ + if (tree_fits_shwi_p (dec)) \ + (TO).constant -= tree_to_shwi (dec); \ + else if ((TO).var == 0) \ + (TO).var = size_binop (MINUS_EXPR, ssize_int (0), \ + fold_convert (ssizetype, dec)); \ + else \ + (TO).var = size_binop (MINUS_EXPR, (TO).var, \ + fold_convert (ssizetype, dec)); \ +} while (0) + +/* Convert the implicit sum in a `struct args_size' into a tree + of type ssizetype. */ +#define ARGS_SIZE_TREE(SIZE) \ +((SIZE).var == 0 ? ssize_int ((SIZE).constant) \ + : size_binop (PLUS_EXPR, fold_convert (ssizetype, (SIZE).var), \ + ssize_int ((SIZE).constant))) + +/* Convert the implicit sum in a `struct args_size' into an rtx. */ +#define ARGS_SIZE_RTX(SIZE) \ +((SIZE).var == 0 ? gen_int_mode ((SIZE).constant, Pmode) \ + : expand_normal (ARGS_SIZE_TREE (SIZE))) + +#define ASLK_REDUCE_ALIGN 1 +#define ASLK_RECORD_PAD 2 + +/* If pointers to member functions use the least significant bit to + indicate whether a function is virtual, ensure a pointer + to this function will have that bit clear. */ +#define MINIMUM_METHOD_BOUNDARY \ + ((TARGET_PTRMEMFUNC_VBIT_LOCATION == ptrmemfunc_vbit_in_pfn) \ + ? MAX (FUNCTION_BOUNDARY, 2 * BITS_PER_UNIT) : FUNCTION_BOUNDARY) + +enum stack_clash_probes { + NO_PROBE_NO_FRAME, + NO_PROBE_SMALL_FRAME, + PROBE_INLINE, + PROBE_LOOP +}; + +extern void dump_stack_clash_frame_info (enum stack_clash_probes, bool); + + +extern void push_function_context (void); +extern void pop_function_context (void); + +/* Save and restore status information for a nested function. */ +extern void free_after_parsing (struct function *); +extern void free_after_compilation (struct function *); + +/* Return size needed for stack frame based on slots so far allocated. + This size counts from zero. It is not rounded to STACK_BOUNDARY; + the caller may have to do that. */ +extern poly_int64 get_frame_size (void); + +/* Issue an error message and return TRUE if frame OFFSET overflows in + the signed target pointer arithmetics for function FUNC. Otherwise + return FALSE. */ +extern bool frame_offset_overflow (poly_int64, tree); + +extern unsigned int spill_slot_alignment (machine_mode); + +extern rtx assign_stack_local_1 (machine_mode, poly_int64, int, int); +extern rtx assign_stack_local (machine_mode, poly_int64, int); +extern rtx assign_stack_temp_for_type (machine_mode, poly_int64, tree); +extern rtx assign_stack_temp (machine_mode, poly_int64); +extern rtx assign_temp (tree, int, int); +extern void update_temp_slot_address (rtx, rtx); +extern void preserve_temp_slots (rtx); +extern void free_temp_slots (void); +extern void push_temp_slots (void); +extern void pop_temp_slots (void); +extern void init_temp_slots (void); +extern rtx get_hard_reg_initial_reg (rtx); +extern rtx get_hard_reg_initial_val (machine_mode, unsigned int); +extern rtx has_hard_reg_initial_val (machine_mode, unsigned int); + +/* Called from gimple_expand_cfg. */ +extern unsigned int emit_initial_value_sets (void); + +extern bool initial_value_entry (int i, rtx *, rtx *); +extern void instantiate_decl_rtl (rtx x); +extern int aggregate_value_p (const_tree, const_tree); +extern bool use_register_for_decl (const_tree); +extern gimple_seq gimplify_parameters (gimple_seq *); +extern void locate_and_pad_parm (machine_mode, tree, int, int, int, + tree, struct args_size *, + struct locate_and_pad_arg_data *); +extern void generate_setjmp_warnings (void); + +/* Identify BLOCKs referenced by more than one NOTE_INSN_BLOCK_{BEG,END}, + and create duplicate blocks. */ +extern void reorder_blocks (void); +extern void clear_block_marks (tree); +extern tree blocks_nreverse (tree); +extern tree block_chainon (tree, tree); + +/* Set BLOCK_NUMBER for all the blocks in FN. */ +extern void number_blocks (tree); + +/* cfun shouldn't be set directly; use one of these functions instead. */ +extern void set_cfun (struct function *new_cfun, bool force = false); +extern void push_cfun (struct function *new_cfun); +extern void pop_cfun (void); + +extern int get_next_funcdef_no (void); +extern int get_last_funcdef_no (void); +extern void allocate_struct_function (tree, bool); +extern void push_struct_function (tree fndecl); +extern void push_dummy_function (bool); +extern void pop_dummy_function (void); +extern void init_dummy_function_start (void); +extern void init_function_start (tree); +extern void stack_protect_epilogue (void); +extern void expand_function_start (tree); +extern void expand_dummy_function_end (void); + +extern void thread_prologue_and_epilogue_insns (void); +extern void diddle_return_value (void (*)(rtx, void*), void*); +extern void clobber_return_register (void); +extern void expand_function_end (void); +extern rtx get_arg_pointer_save_area (void); +extern void maybe_copy_prologue_epilogue_insn (rtx, rtx); +extern int prologue_contains (const rtx_insn *); +extern int epilogue_contains (const rtx_insn *); +extern int prologue_epilogue_contains (const rtx_insn *); +extern void record_prologue_seq (rtx_insn *); +extern void record_epilogue_seq (rtx_insn *); +extern void emit_return_into_block (bool simple_p, basic_block bb); +extern void set_return_jump_label (rtx_insn *); +extern bool active_insn_between (rtx_insn *head, rtx_insn *tail); +extern vec convert_jumps_to_returns (basic_block last_bb, bool simple_p, + vec unconverted); +extern basic_block emit_return_for_exit (edge exit_fallthru_edge, + bool simple_p); +extern void reposition_prologue_and_epilogue_notes (void); + +/* Returns the name of the current function. */ +extern const char *fndecl_name (tree); +extern const char *function_name (struct function *); +extern const char *current_function_name (void); + +extern void used_types_insert (tree); + +#endif /* GCC_FUNCTION_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcc-plugin.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcc-plugin.h new file mode 100644 index 0000000..ee9aa86 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcc-plugin.h @@ -0,0 +1,47 @@ +/* Public header file for plugins to include. + Copyright (C) 2009-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_PLUGIN_H +#define GCC_PLUGIN_H + +#ifndef IN_GCC +#define IN_GCC +#endif + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "backend.h" +#include "cfghooks.h" +#include "hard-reg-set.h" +#include "cfgrtl.h" +#include "cfganal.h" +#include "lcm.h" +#include "cfgbuild.h" +#include "cfgcleanup.h" +#include "plugin-api.h" +#include "ipa-ref.h" +#include "alias.h" +#include "flags.h" +#include "tree-core.h" +#include "fold-const.h" +#include "tree-check.h" +#include "plugin.h" + +#endif /* GCC_PLUGIN_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcc-rich-location.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcc-rich-location.h new file mode 100644 index 0000000..a43fe38 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcc-rich-location.h @@ -0,0 +1,226 @@ +/* Declarations relating to class gcc_rich_location + Copyright (C) 2014-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_RICH_LOCATION_H +#define GCC_RICH_LOCATION_H + +/* A gcc_rich_location is libcpp's rich_location with additional + helper methods for working with gcc's types. The class is not + copyable or assignable because rich_location isn't. */ + +class gcc_rich_location : public rich_location +{ + public: + /* Constructors. */ + + /* Constructing from a location. */ + explicit gcc_rich_location (location_t loc, const range_label *label = NULL) + : rich_location (line_table, loc, label) + { + } + + /* Methods for adding ranges via gcc entities. */ + void + add_expr (tree expr, range_label *label); + + void + maybe_add_expr (tree t, range_label *label); + + void add_fixit_misspelled_id (location_t misspelled_token_loc, + tree hint_id); + + /* If LOC is within the spans of lines that will already be printed for + this gcc_rich_location, then add it as a secondary location + and return true. + + Otherwise return false. + + This allows for a diagnostic to compactly print secondary locations + in one diagnostic when these are near enough the primary locations for + diagnostics-show-locus.c to cope with them, and to fall back to + printing them via a note otherwise e.g.: + + gcc_rich_location richloc (primary_loc); + bool added secondary = richloc.add_location_if_nearby (secondary_loc); + error_at (&richloc, "main message"); + if (!added secondary) + inform (secondary_loc, "message for secondary"); + + Implemented in diagnostic-show-locus.cc. */ + + bool add_location_if_nearby (location_t loc, + bool restrict_to_current_line_spans = true, + const range_label *label = NULL); + + /* Add a fix-it hint suggesting the insertion of CONTENT before + INSERTION_POINT. + + Attempt to handle formatting: if INSERTION_POINT is the first thing on + its line, and INDENT is sufficiently sane, then add CONTENT on its own + line, using the indentation of INDENT. + Otherwise, add CONTENT directly before INSERTION_POINT. + + For example, adding "CONTENT;" with the closing brace as the insertion + point and using "INDENT;" for indentation: + + if () + { + INDENT; + } + + would lead to: + + if () + { + INDENT; + CONTENT; + } + + but adding it to: + + if () {INDENT;} + + would lead to: + + if () {INDENT;CONTENT;} + */ + void add_fixit_insert_formatted (const char *content, + location_t insertion_point, + location_t indent); +}; + +/* Concrete subclass of libcpp's range_label. + Simple implementation using a string literal. */ + +class text_range_label : public range_label +{ + public: + text_range_label (const char *text) : m_text (text) {} + + label_text get_text (unsigned /*range_idx*/) const FINAL OVERRIDE + { + return label_text::borrow (m_text); + } + + private: + const char *m_text; +}; + +/* Concrete subclass of libcpp's range_label for use in + diagnostics involving mismatched types. + + Each frontend that uses this should supply its own implementation. + + Generate a label describing LABELLED_TYPE. The frontend may use + OTHER_TYPE where appropriate for highlighting the differences between + the two types (analogous to C++'s use of %H and %I with + template types). + + Either or both of LABELLED_TYPE and OTHER_TYPE may be NULL_TREE. + If LABELLED_TYPE is NULL_TREE, then there is no label. + + For example, this rich_location could use two instances of + range_label_for_type_mismatch: + + printf ("arg0: %i arg1: %s arg2: %i", + ^~ + | + const char * + 100, 101, 102); + ~~~ + | + int + + (a) the label for "%s" with LABELLED_TYPE for "const char*" and + (b) the label for "101" with LABELLED TYPE for "int" + where each one uses the other's type as OTHER_TYPE. */ + +class range_label_for_type_mismatch : public range_label +{ + public: + range_label_for_type_mismatch (tree labelled_type, tree other_type) + : m_labelled_type (labelled_type), m_other_type (other_type) + { + } + + label_text get_text (unsigned range_idx) const OVERRIDE; + + protected: + tree m_labelled_type; + tree m_other_type; +}; + +/* Subclass of range_label for labelling the type of EXPR when reporting + a type mismatch between EXPR and OTHER_EXPR. + Either or both of EXPR and OTHER_EXPR could be NULL. */ + +class maybe_range_label_for_tree_type_mismatch : public range_label +{ + public: + maybe_range_label_for_tree_type_mismatch (tree expr, tree other_expr) + : m_expr (expr), m_other_expr (other_expr) + { + } + + label_text get_text (unsigned range_idx) const FINAL OVERRIDE; + + private: + tree m_expr; + tree m_other_expr; +}; + +class op_location_t; + +/* A subclass of rich_location for showing problems with binary operations. + + If enough location information is available, the ctor will make a + 3-location rich_location of the form: + + arg_0 op arg_1 + ~~~~~ ^~ ~~~~~ + | | + | arg1 type + arg0 type + + labelling the types of the arguments if SHOW_TYPES is true. + + Otherwise, it will fall back to a 1-location rich_location using the + compound location within LOC: + + arg_0 op arg_1 + ~~~~~~^~~~~~~~ + + for which we can't label the types. */ + +class binary_op_rich_location : public gcc_rich_location +{ + public: + binary_op_rich_location (const op_location_t &loc, + tree arg0, tree arg1, + bool show_types); + + private: + static bool use_operator_loc_p (const op_location_t &loc, + tree arg0, tree arg1); + + maybe_range_label_for_tree_type_mismatch m_label_for_arg0; + maybe_range_label_for_tree_type_mismatch m_label_for_arg1; +}; + +#endif /* GCC_RICH_LOCATION_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcc-symtab.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcc-symtab.h new file mode 100644 index 0000000..feb1033 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcc-symtab.h @@ -0,0 +1,28 @@ +/* Declarations for symtab.cc. + FIXME - This file should be named symtab.h, but that name conflicts + with libcpp's symtab.h. + + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_SYMTAB_H +#define GCC_SYMTAB_H + +extern void change_decl_assembler_name (tree, tree); + +#endif // GCC_SYMTAB_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcc.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcc.h new file mode 100644 index 0000000..63231dd --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcc.h @@ -0,0 +1,100 @@ +/* Header file for modules that link with gcc.cc + Copyright (C) 1999-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GCC_H +#define GCC_GCC_H + +#include "version.h" +#include "diagnostic-core.h" + +/* The top-level "main" within the driver would be ~1000 lines long. + This class breaks it up into smaller functions and contains some + state shared by them. */ + +class driver +{ + public: + driver (bool can_finalize, bool debug); + ~driver (); + int main (int argc, char **argv); + void finalize (); + + private: + void set_progname (const char *argv0) const; + void expand_at_files (int *argc, char ***argv) const; + void decode_argv (int argc, const char **argv); + void global_initializations (); + void build_multilib_strings () const; + void set_up_specs () const; + void putenv_COLLECT_GCC (const char *argv0) const; + void maybe_putenv_COLLECT_LTO_WRAPPER () const; + void maybe_putenv_OFFLOAD_TARGETS () const; + void handle_unrecognized_options (); + int maybe_print_and_exit () const; + bool prepare_infiles (); + void do_spec_on_infiles () const; + void maybe_run_linker (const char *argv0) const; + void final_actions () const; + void detect_jobserver () const; + int get_exit_code () const; + + private: + char *explicit_link_files; + struct cl_decoded_option *decoded_options; + unsigned int decoded_options_count; + option_proposer m_option_proposer; +}; + +/* The mapping of a spec function name to the C function that + implements it. */ +struct spec_function +{ + const char *name; + const char *(*func) (int, const char **); +}; + +/* These are exported by gcc.cc. */ +extern int do_spec (const char *); +extern void record_temp_file (const char *, int, int); +extern void set_input (const char *); + +/* Spec files linked with gcc.cc must provide definitions for these. */ + +/* Called before processing to change/add/remove arguments. */ +extern void lang_specific_driver (struct cl_decoded_option **, + unsigned int *, int *); + +/* Called before linking. Returns 0 on success and -1 on failure. */ +extern int lang_specific_pre_link (void); + +extern int n_infiles; + +/* Number of extra output files that lang_specific_pre_link may generate. */ +extern int lang_specific_extra_outfiles; + +/* A vector of corresponding output files is made up later. */ + +extern const char **outfiles; + +extern void +driver_get_configure_time_options (void (*cb)(const char *option, + void *user_data), + void *user_data); + +#endif /* ! GCC_GCC_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcov-counter.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcov-counter.def new file mode 100644 index 0000000..6d2182b --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcov-counter.def @@ -0,0 +1,51 @@ +/* Definitions for the gcov counters in the GNU compiler. + Copyright (C) 2001-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Before including this file, define a macro: + + DEF_GCOV_COUNTER(COUNTER, NAME, FN_TYPE) + + This macro will be expanded to all supported gcov counters, their + names, or the type of handler functions. FN_TYPE will be + expanded to a handler function, like in gcov_merge, it is + expanded to __gcov_merge ## FN_TYPE. */ + +/* Arc transitions. */ +DEF_GCOV_COUNTER(GCOV_COUNTER_ARCS, "arcs", _add) + +/* Histogram of value inside an interval. */ +DEF_GCOV_COUNTER(GCOV_COUNTER_V_INTERVAL, "interval", _add) + +/* Histogram of exact power2 logarithm of a value. */ +DEF_GCOV_COUNTER(GCOV_COUNTER_V_POW2, "pow2", _add) + +/* The most common value of expression. */ +DEF_GCOV_COUNTER(GCOV_COUNTER_V_TOPN, "topn", _topn) + +/* The most common indirect address. */ +DEF_GCOV_COUNTER(GCOV_COUNTER_V_INDIR, "indirect_call", _topn) + +/* Compute average value passed to the counter. */ +DEF_GCOV_COUNTER(GCOV_COUNTER_AVERAGE, "average", _add) + +/* IOR of the all values passed to counter. */ +DEF_GCOV_COUNTER(GCOV_COUNTER_IOR, "ior", _ior) + +/* Time profile collecting first run of a function */ +DEF_GCOV_COUNTER(GCOV_TIME_PROFILER, "time_profiler", _time_profile) diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcov-io.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcov-io.h new file mode 100644 index 0000000..99ce7db --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcov-io.h @@ -0,0 +1,389 @@ +/* File format for coverage information + Copyright (C) 1996-2022 Free Software Foundation, Inc. + Contributed by Bob Manson . + Completely remangled by Nathan Sidwell . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + + +/* CAVEAT: Coverage information files should not be parsed directly, + instead use `gcov --json-format`, which provides + machine-readable coverage information. + + Note that the following file format documentation might be outdated. + + Coverage information is held in two files. A notes file, which is + generated by the compiler, and a data file, which is generated by + the program under test. Both files use a similar structure. We do + not attempt to make these files backwards compatible with previous + versions, as you only need coverage information when developing a + program. We do hold version information, so that mismatches can be + detected, and we use a format that allows tools to skip information + they do not understand or are not interested in. + + Numbers are recorded in the 32 bit unsigned binary form of the + endianness of the machine generating the file. 64 bit numbers are + stored as two 32 bit numbers, the low part first. + The number of bytes is stored, followed by the + string. Zero length and NULL strings are simply stored as a length + of zero (they have no trailing NUL). + + int32: byte3 byte2 byte1 byte0 | byte0 byte1 byte2 byte3 + int64: int32:low int32:high + string: int32:0 | int32:length char* char:0 + item: int32 | int64 | string + + The basic format of the notes file is + + file : int32:magic int32:version int32:stamp int32:support_unexecuted_blocks record* + + The basic format of the data file is + + file : int32:magic int32:version int32:stamp record* + + The magic ident is different for the notes and the data files. The + magic ident is used to determine the endianness of the file, when + reading. The version is the same for both files and is derived + from gcc's version number. The stamp value is used to synchronize + note and data files and to synchronize merging within a data + file. It need not be an absolute time stamp, merely a ticker that + increments fast enough and cycles slow enough to distinguish + different compile/run/compile cycles. + + Although the ident and version are formally 32 bit numbers, they + are derived from 4 character ASCII strings. The version number + consists of a two character major version number + (first digit starts from 'A' letter to not to clash with the older + numbering scheme), the single character minor version number, + and a single character indicating the status of the release. + That will be 'e' experimental, 'p' prerelease and 'r' for release. + Because, by good fortune, these are in alphabetical order, string + collating can be used to compare version strings. Be aware that + the 'e' designation will (naturally) be unstable and might be + incompatible with itself. For gcc 17.0 experimental, it would be + 'B70e' (0x42373065). As we currently do not release more than 5 minor + releases, the single character should be always fine. Major number + is currently changed roughly every year, which gives us space + for next 250 years (maximum allowed number would be 259.9). + + A record has a tag, length and variable amount of data. + + record: header data + header: int32:tag int32:length + data: item* + + Records are not nested, but there is a record hierarchy. Tag + numbers reflect this hierarchy. Tags are unique across note and + data files. Some record types have a varying amount of data. The + LENGTH is the number of 4bytes that follow and is usually used to + determine how much data. The tag value is split into 4 8-bit + fields, one for each of four possible levels. The most significant + is allocated first. Unused levels are zero. Active levels are + odd-valued, so that the LSB of the level is one. A sub-level + incorporates the values of its superlevels. This formatting allows + you to determine the tag hierarchy, without understanding the tags + themselves, and is similar to the standard section numbering used + in technical documents. Level values [1..3f] are used for common + tags, values [41..9f] for the notes file and [a1..ff] for the data + file. + + The notes file contains the following records + note: unit function-graph* + unit: header int32:checksum string:source + function-graph: announce_function basic_blocks {arcs | lines}* + announce_function: header int32:ident + int32:lineno_checksum int32:cfg_checksum + string:name string:source int32:start_lineno int32:start_column int32:end_lineno + basic_block: header int32:flags* + arcs: header int32:block_no arc* + arc: int32:dest_block int32:flags + lines: header int32:block_no line* + int32:0 string:NULL + line: int32:line_no | int32:0 string:filename + + The BASIC_BLOCK record holds per-bb flags. The number of blocks + can be inferred from its data length. There is one ARCS record per + basic block. The number of arcs from a bb is implicit from the + data length. It enumerates the destination bb and per-arc flags. + There is one LINES record per basic block, it enumerates the source + lines which belong to that basic block. Source file names are + introduced by a line number of 0, following lines are from the new + source file. The initial source file for the function is NULL, but + the current source file should be remembered from one LINES record + to the next. The end of a block is indicated by an empty filename + - this does not reset the current source file. Note there is no + ordering of the ARCS and LINES records: they may be in any order, + interleaved in any manner. The current filename follows the order + the LINES records are stored in the file, *not* the ordering of the + blocks they are for. + + The data file contains the following records. + data: {unit summary:object function-data*}* + unit: header int32:checksum + function-data: announce_function present counts + announce_function: header int32:ident + int32:lineno_checksum int32:cfg_checksum + present: header int32:present + counts: header int64:count* + summary: int32:checksum int32:runs int32:sum_max + + The ANNOUNCE_FUNCTION record is the same as that in the note file, + but without the source location. The COUNTS gives the + counter values for instrumented features. The about the whole + program. The checksum is used for whole program summaries, and + disambiguates different programs which include the same + instrumented object file. There may be several program summaries, + each with a unique checksum. The object summary's checksum is + zero. Note that the data file might contain information from + several runs concatenated, or the data might be merged. + + This file is included by both the compiler, gcov tools and the + runtime support library libgcov. IN_LIBGCOV and IN_GCOV are used to + distinguish which case is which. If IN_LIBGCOV is nonzero, + libgcov is being built. If IN_GCOV is nonzero, the gcov tools are + being built. Otherwise the compiler is being built. IN_GCOV may be + positive or negative. If positive, we are compiling a tool that + requires additional functions (see the code for knowledge of what + those functions are). */ + +#ifndef GCC_GCOV_IO_H +#define GCC_GCOV_IO_H + +/* GCOV key-value pair linked list type. */ + +struct gcov_kvp; + +struct gcov_kvp +{ + gcov_type value; + gcov_type count; + struct gcov_kvp *next; +}; + +#ifndef IN_LIBGCOV +/* About the host */ + +typedef unsigned gcov_unsigned_t; +typedef unsigned gcov_position_t; +/* gcov_type is typedef'd elsewhere for the compiler */ +#if IN_GCOV +#define GCOV_LINKAGE static +typedef int64_t gcov_type; +typedef uint64_t gcov_type_unsigned; +#if IN_GCOV > 0 +#include +#endif +#endif + +#if defined (HOST_HAS_F_SETLKW) +#define GCOV_LOCKED 1 +#else +#define GCOV_LOCKED 0 +#endif + +#if defined (HOST_HAS_LK_LOCK) +#define GCOV_LOCKED_WITH_LOCKING 1 +#else +#define GCOV_LOCKED_WITH_LOCKING 0 +#endif + +#define ATTRIBUTE_HIDDEN + +#endif /* !IN_LIBGCOV */ + +#ifndef GCOV_LINKAGE +#define GCOV_LINKAGE extern +#endif + +#if IN_LIBGCOV +#define gcov_nonruntime_assert(EXPR) ((void)(0 && (EXPR))) +#else +#define gcov_nonruntime_assert(EXPR) gcc_assert (EXPR) +#define gcov_error(...) fatal_error (input_location, __VA_ARGS__) +#endif + +/* File suffixes. */ +#define GCOV_DATA_SUFFIX ".gcda" +#define GCOV_NOTE_SUFFIX ".gcno" + +/* File magic. Must not be palindromes. */ +#define GCOV_DATA_MAGIC ((gcov_unsigned_t)0x67636461) /* "gcda" */ +#define GCOV_NOTE_MAGIC ((gcov_unsigned_t)0x67636e6f) /* "gcno" */ + +#include "version.h" + +/* Convert a magic or version number to a 4 character string. */ +#define GCOV_UNSIGNED2STRING(ARRAY,VALUE) \ + ((ARRAY)[0] = (char)((VALUE) >> 24), \ + (ARRAY)[1] = (char)((VALUE) >> 16), \ + (ARRAY)[2] = (char)((VALUE) >> 8), \ + (ARRAY)[3] = (char)((VALUE) >> 0)) + +/* The record tags. Values [1..3f] are for tags which may be in either + file. Values [41..9f] for those in the note file and [a1..ff] for + the data file. The tag value zero is used as an explicit end of + file marker -- it is not required to be present. + All length values are in bytes. */ + +#define GCOV_WORD_SIZE 4 + +#define GCOV_TAG_FUNCTION ((gcov_unsigned_t)0x01000000) +#define GCOV_TAG_FUNCTION_LENGTH (3 * GCOV_WORD_SIZE) +#define GCOV_TAG_BLOCKS ((gcov_unsigned_t)0x01410000) +#define GCOV_TAG_BLOCKS_LENGTH(NUM) (NUM) +#define GCOV_TAG_ARCS ((gcov_unsigned_t)0x01430000) +#define GCOV_TAG_ARCS_LENGTH(NUM) (1 + (NUM) * 2 * GCOV_WORD_SIZE) +#define GCOV_TAG_ARCS_NUM(LENGTH) (((LENGTH / GCOV_WORD_SIZE) - 1) / 2) +#define GCOV_TAG_LINES ((gcov_unsigned_t)0x01450000) +#define GCOV_TAG_COUNTER_BASE ((gcov_unsigned_t)0x01a10000) +#define GCOV_TAG_COUNTER_LENGTH(NUM) ((NUM) * 2 * GCOV_WORD_SIZE) +#define GCOV_TAG_COUNTER_NUM(LENGTH) ((LENGTH / GCOV_WORD_SIZE) / 2) +#define GCOV_TAG_OBJECT_SUMMARY ((gcov_unsigned_t)0xa1000000) +#define GCOV_TAG_PROGRAM_SUMMARY ((gcov_unsigned_t)0xa3000000) /* Obsolete */ +#define GCOV_TAG_SUMMARY_LENGTH (2 * GCOV_WORD_SIZE) +#define GCOV_TAG_AFDO_FILE_NAMES ((gcov_unsigned_t)0xaa000000) +#define GCOV_TAG_AFDO_FUNCTION ((gcov_unsigned_t)0xac000000) +#define GCOV_TAG_AFDO_WORKING_SET ((gcov_unsigned_t)0xaf000000) + + +/* Counters that are collected. */ + +#define DEF_GCOV_COUNTER(COUNTER, NAME, MERGE_FN) COUNTER, +enum { +#include "gcov-counter.def" +GCOV_COUNTERS +}; +#undef DEF_GCOV_COUNTER + +/* The first of counters used for value profiling. They must form a + consecutive interval and their order must match the order of + HIST_TYPEs in value-prof.h. */ +#define GCOV_FIRST_VALUE_COUNTER GCOV_COUNTER_V_INTERVAL + +/* The last of counters used for value profiling. */ +#define GCOV_LAST_VALUE_COUNTER (GCOV_COUNTERS - 1) + +/* Number of counters used for value profiling. */ +#define GCOV_N_VALUE_COUNTERS \ + (GCOV_LAST_VALUE_COUNTER - GCOV_FIRST_VALUE_COUNTER + 1) + +/* Number of top N counters when being in memory. */ +#define GCOV_TOPN_MEM_COUNTERS 3 + +/* Number of top N counters in disk representation. */ +#define GCOV_TOPN_DISK_COUNTERS 2 + +/* Maximum number of tracked TOP N value profiles. */ +#define GCOV_TOPN_MAXIMUM_TRACKED_VALUES 32 + +/* Convert a counter index to a tag. */ +#define GCOV_TAG_FOR_COUNTER(COUNT) \ + (GCOV_TAG_COUNTER_BASE + ((gcov_unsigned_t)(COUNT) << 17)) +/* Convert a tag to a counter. */ +#define GCOV_COUNTER_FOR_TAG(TAG) \ + ((unsigned)(((TAG) - GCOV_TAG_COUNTER_BASE) >> 17)) +/* Check whether a tag is a counter tag. */ +#define GCOV_TAG_IS_COUNTER(TAG) \ + (!((TAG) & 0xFFFF) && GCOV_COUNTER_FOR_TAG (TAG) < GCOV_COUNTERS) + +/* The tag level mask has 1's in the position of the inner levels, & + the lsb of the current level, and zero on the current and outer + levels. */ +#define GCOV_TAG_MASK(TAG) (((TAG) - 1) ^ (TAG)) + +/* Return nonzero if SUB is an immediate subtag of TAG. */ +#define GCOV_TAG_IS_SUBTAG(TAG,SUB) \ + (GCOV_TAG_MASK (TAG) >> 8 == GCOV_TAG_MASK (SUB) \ + && !(((SUB) ^ (TAG)) & ~GCOV_TAG_MASK (TAG))) + +/* Return nonzero if SUB is at a sublevel to TAG. */ +#define GCOV_TAG_IS_SUBLEVEL(TAG,SUB) \ + (GCOV_TAG_MASK (TAG) > GCOV_TAG_MASK (SUB)) + +/* Basic block flags. */ +#define GCOV_BLOCK_UNEXPECTED (1 << 1) + +/* Arc flags. */ +#define GCOV_ARC_ON_TREE (1 << 0) +#define GCOV_ARC_FAKE (1 << 1) +#define GCOV_ARC_FALLTHROUGH (1 << 2) + +/* Object & program summary record. */ + +struct gcov_summary +{ + gcov_unsigned_t runs; /* Number of program runs. */ + gcov_type sum_max; /* Sum of individual run max values. */ +}; + +#if !defined(inhibit_libc) + +/* Functions for reading and writing gcov files. In libgcov you can + open the file for reading then writing. Elsewhere you can open the + file either for reading or for writing. When reading a file you may + use the gcov_read_* functions, gcov_sync, gcov_position, & + gcov_error. When writing a file you may use the gcov_write + functions, gcov_seek & gcov_error. When a file is to be rewritten + you use the functions for reading, then gcov_rewrite then the + functions for writing. Your file may become corrupted if you break + these invariants. */ + +#if !IN_LIBGCOV +GCOV_LINKAGE int gcov_open (const char */*name*/, int /*direction*/); +#endif + +#if !IN_LIBGCOV || defined (IN_GCOV_TOOL) +GCOV_LINKAGE int gcov_magic (gcov_unsigned_t, gcov_unsigned_t); +#endif + +/* Available everywhere. */ +GCOV_LINKAGE int gcov_close (void) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE gcov_unsigned_t gcov_read_unsigned (void) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE gcov_type gcov_read_counter (void) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE void gcov_read_summary (struct gcov_summary *) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE const char *gcov_read_string (void); +GCOV_LINKAGE void gcov_sync (gcov_position_t /*base*/, + gcov_unsigned_t /*length */); +char *mangle_path (char const *base); + +#if !IN_GCOV +/* Available outside gcov */ +GCOV_LINKAGE void gcov_write (const void *, unsigned) ATTRIBUTE_HIDDEN; +GCOV_LINKAGE void gcov_write_unsigned (gcov_unsigned_t) ATTRIBUTE_HIDDEN; +#endif + +#if !IN_GCOV && !IN_LIBGCOV +/* Available only in compiler */ +GCOV_LINKAGE void gcov_write_string (const char *); +GCOV_LINKAGE void gcov_write_filename (const char *); +GCOV_LINKAGE gcov_position_t gcov_write_tag (gcov_unsigned_t); +GCOV_LINKAGE void gcov_write_length (gcov_position_t /*position*/); +#endif + +#if IN_GCOV > 0 +/* Available in gcov */ +GCOV_LINKAGE time_t gcov_time (void); +#endif + +#endif /* !inhibit_libc */ + +#endif /* GCC_GCOV_IO_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcse-common.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcse-common.h new file mode 100644 index 0000000..358616e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcse-common.h @@ -0,0 +1,47 @@ +/* Structures and prototypes common across the normal GCSE + implementation and the post-reload implementation. + Copyright (C) 1997-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GCSE_COMMON_H +#define GCC_GCSE_COMMON_H + +typedef vec vec_rtx_heap; +struct modify_pair +{ + rtx dest; /* A MEM. */ + rtx dest_addr; /* The canonical address of `dest'. */ +}; + +typedef vec vec_modify_pair_heap; + +struct gcse_note_stores_info +{ + rtx_insn *insn; + vec *canon_mem_list; +}; + +extern void compute_transp (const_rtx, int, sbitmap *, bitmap, + bitmap, vec *); +extern void record_last_mem_set_info_common (rtx_insn *, + vec *, + vec *, + bitmap, bitmap); + + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcse.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcse.h new file mode 100644 index 0000000..4a201f3 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gcse.h @@ -0,0 +1,45 @@ +/* Global common subexpression elimination/Partial redundancy elimination + and global constant/copy propagation for GNU compiler. + Copyright (C) 1997-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GCSE_H +#define GCC_GCSE_H + +/* Target-dependent globals. */ +struct target_gcse { + /* Nonzero for each mode that supports (set (reg) (reg)). + This is trivially true for integer and floating point values. + It may or may not be true for condition codes. */ + char x_can_copy[(int) NUM_MACHINE_MODES]; + + /* True if the previous field has been initialized. */ + bool x_can_copy_init_p; +}; + +extern struct target_gcse default_target_gcse; +#if SWITCHABLE_TARGET +extern struct target_gcse *this_target_gcse; +#else +#define this_target_gcse (&default_target_gcse) +#endif + +void gcse_cc_finalize (void); +extern bool gcse_or_cprop_is_too_expensive (const char *); + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/generic-match.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/generic-match.h new file mode 100644 index 0000000..0fc2526 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/generic-match.h @@ -0,0 +1,33 @@ +/* Generic simplify definitions. + + Copyright (C) 2011-2022 Free Software Foundation, Inc. + Contributed by Richard Guenther + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GENERIC_MATCH_H +#define GCC_GENERIC_MATCH_H + +/* Note the following functions are supposed to be only used from + fold_unary_loc, fold_binary_loc and fold_ternary_loc respectively. + They are not considered a public API. */ + +tree generic_simplify (location_t, enum tree_code, tree, tree); +tree generic_simplify (location_t, enum tree_code, tree, tree, tree); +tree generic_simplify (location_t, enum tree_code, tree, tree, tree, tree); + +#endif /* GCC_GENERIC_MATCH_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gengtype.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gengtype.h new file mode 100644 index 0000000..76c1fe6 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gengtype.h @@ -0,0 +1,521 @@ +/* Process source files and output type information. + Copyright (C) 2002-2022 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 3, or (at your option) any later + version. + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#ifndef GCC_GENGTYPE_H +#define GCC_GENGTYPE_H + +#define obstack_chunk_alloc xmalloc +#define obstack_chunk_free free +#define OBSTACK_CHUNK_SIZE 0 + +/* Sets of accepted source languages like C, C++, Ada... are + represented by a bitmap. */ +typedef unsigned lang_bitmap; + +/* Variable length structure representing an input file. A hash table + ensure uniqueness for a given input file name. The only function + allocating input_file-s is input_file_by_name. */ +struct input_file_st +{ + struct outf* inpoutf; /* Cached corresponding output file, computed + in get_output_file_with_visibility. */ + lang_bitmap inpbitmap; /* The set of languages using this file. */ + bool inpisplugin; /* Flag set for plugin input files. */ + char inpname[1]; /* A variable-length array, ended by a null + char. */ +}; +typedef struct input_file_st input_file; + +/* A file position, mostly for error messages. + The FILE element may be compared using pointer equality. */ +struct fileloc +{ + const input_file *file; + int line; +}; + + +/* Table of all input files and its size. */ +extern const input_file** gt_files; +extern size_t num_gt_files; + +/* Table of headers to be included in gtype-desc.cc that are generated + during the build. These are identified as "./.h". */ +extern const char **build_headers; +extern size_t num_build_headers; + +/* A number of places use the name of this "gengtype.cc" file for a + location for things that we can't rely on the source to define. We + also need to refer to the "system.h" file specifically. These two + pointers are initialized early in main. */ +extern input_file* this_file; +extern input_file* system_h_file; + +/* Retrieve or create the input_file for a given name, which is a file + path. This is the only function allocating input_file-s and it is + hash-consing them. */ +input_file* input_file_by_name (const char* name); + +/* For F an input_file, return the relative path to F from $(srcdir) + if the latter is a prefix in F, NULL otherwise. */ +const char *get_file_srcdir_relative_path (const input_file *inpf); + +/* Get the name of an input file. */ +static inline const char* +get_input_file_name (const input_file *inpf) +{ + if (inpf) + return inpf->inpname; + return NULL; +} + +/* Return a bitmap which has bit `1 << BASE_FILE_' set iff + INPUT_FILE is used by . + + This function should be written to assume that a file _is_ used + if the situation is unclear. If it wrongly assumes a file _is_ used, + a linker error will result. If it wrongly assumes a file _is not_ used, + some GC roots may be missed, which is a much harder-to-debug problem. + */ + +static inline lang_bitmap +get_lang_bitmap (const input_file* inpf) +{ + if (inpf == NULL) + return 0; + return inpf->inpbitmap; +} + +/* Set the bitmap returned by get_lang_bitmap. The only legitimate + callers of this function are read_input_list & read_state_*. */ +static inline void +set_lang_bitmap (input_file* inpf, lang_bitmap n) +{ + gcc_assert (inpf); + inpf->inpbitmap = n; +} + +/* Vector of per-language directories. */ +extern const char **lang_dir_names; +extern size_t num_lang_dirs; + +/* Data types handed around within, but opaque to, the lexer and parser. */ +typedef struct pair *pair_p; +typedef struct type *type_p; +typedef const struct type *const_type_p; +typedef struct options *options_p; + +/* Variables used to communicate between the lexer and the parser. */ +extern int lexer_toplevel_done; +extern struct fileloc lexer_line; + +/* Various things, organized as linked lists, needed both in + gengtype.cc & in gengtype-state.cc files. */ +extern pair_p typedefs; +extern type_p structures; +extern pair_p variables; + +/* An enum for distinguishing GGC vs PCH. */ + +enum write_types_kinds +{ + WTK_GGC, + WTK_PCH, + + NUM_WTK +}; + +/* Discrimating kind of types we can understand. */ + +enum typekind { + TYPE_NONE=0, /* Never used, so zeroed memory is invalid. */ + TYPE_UNDEFINED, /* We have not yet seen a definition for this type. + If a type is still undefined when generating code, + an error will be generated. */ + TYPE_SCALAR, /* Scalar types like char. */ + TYPE_STRING, /* The string type. */ + TYPE_STRUCT, /* Type for GTY-ed structs. */ + TYPE_UNION, /* Type for GTY-ed discriminated unions. */ + TYPE_POINTER, /* Pointer type to GTY-ed type. */ + TYPE_ARRAY, /* Array of GTY-ed types. */ + TYPE_CALLBACK, /* A function pointer that needs relocation if + the executable has been loaded at a different + address. */ + TYPE_LANG_STRUCT, /* GCC front-end language specific structs. + Various languages may have homonymous but + different structs. */ + TYPE_USER_STRUCT /* User defined type. Walkers and markers for + this type are assumed to be provided by the + user. */ +}; + +/* Discriminating kind for options. */ +enum option_kind { + OPTION_NONE=0, /* Never used, so zeroed memory is invalid. */ + OPTION_STRING, /* A string-valued option. Most options are + strings. */ + OPTION_TYPE, /* A type-valued option. */ + OPTION_NESTED /* Option data for 'nested_ptr'. */ +}; + + +/* A way to pass data through to the output end. */ +struct options { + struct options *next; /* next option of the same pair. */ + const char *name; /* GTY option name. */ + enum option_kind kind; /* discriminating option kind. */ + union { + const char* string; /* When OPTION_STRING. */ + type_p type; /* When OPTION_TYPE. */ + struct nested_ptr_data* nested; /* when OPTION_NESTED. */ + } info; +}; + + +/* Option data for the 'nested_ptr' option. */ +struct nested_ptr_data { + type_p type; + const char *convert_to; + const char *convert_from; +}; + +/* Some functions to create various options structures with name NAME + and info INFO. NEXT is the next option in the chain. */ + +/* Create a string option. */ +options_p create_string_option (options_p next, const char* name, + const char* info); + +/* Create a type option. */ +options_p create_type_option (options_p next, const char* name, + type_p info); + +/* Create a nested option. */ +options_p create_nested_option (options_p next, const char* name, + struct nested_ptr_data* info); + +/* Create a nested pointer option. */ +options_p create_nested_ptr_option (options_p next, type_p t, + const char *to, const char *from); + +/* A name and a type. */ +struct pair { + pair_p next; /* The next pair in the linked list. */ + const char *name; /* The defined name. */ + type_p type; /* Its GTY-ed type. */ + struct fileloc line; /* The file location. */ + options_p opt; /* GTY options, as a linked list. */ +}; + +/* Usage information for GTY-ed types. Gengtype has to care only of + used GTY-ed types. Types are initially unused, and their usage is + computed by set_gc_used_type and set_gc_used functions. */ + +enum gc_used_enum { + + /* We need that zeroed types are initially unused. */ + GC_UNUSED=0, + + /* The GTY-ed type is used, e.g by a GTY-ed variable or a field + inside a GTY-ed used type. */ + GC_USED, + + /* For GTY-ed structures whose definitions we haven't seen so far + when we encounter a pointer to it that is annotated with + ``maybe_undef''. If after reading in everything we don't have + source file information for it, we assume that it never has been + defined. */ + GC_MAYBE_POINTED_TO, + + /* For known GTY-ed structures which are pointed to by GTY-ed + variables or fields. */ + GC_POINTED_TO +}; + +/* Our type structure describes all types handled by gengtype. */ +struct type { + /* Discriminating kind, cannot be TYPE_NONE. */ + enum typekind kind; + + /* For top-level structs or unions, the 'next' field links the + global list 'structures'; for lang_structs, their homonymous structs are + linked using this 'next' field. The homonymous list starts at the + s.lang_struct field of the lang_struct. See the new_structure function + for details. This is tricky! */ + type_p next; + + /* State number used when writing & reading the persistent state. A + type with a positive number has already been written. For ease + of debugging, newly allocated types have a unique negative + number. */ + int state_number; + + /* Each GTY-ed type which is pointed to by some GTY-ed type knows + the GTY pointer type pointing to it. See create_pointer + function. */ + type_p pointer_to; + + /* Type usage information, computed by set_gc_used_type and + set_gc_used functions. */ + enum gc_used_enum gc_used; + + /* The following union is discriminated by the 'kind' field above. */ + union { + /* TYPE__NONE is impossible. */ + + /* when TYPE_POINTER: */ + type_p p; + + /* when TYPE_STRUCT or TYPE_UNION or TYPE_LANG_STRUCT, we have an + aggregate type containing fields: */ + struct { + const char *tag; /* the aggregate tag, if any. */ + struct fileloc line; /* the source location. */ + pair_p fields; /* the linked list of fields. */ + options_p opt; /* the GTY options if any. */ + lang_bitmap bitmap; /* the set of front-end languages + using that GTY-ed aggregate. */ + /* For TYPE_LANG_STRUCT, the lang_struct field gives the first + element of a linked list of homonymous struct or union types. + Within this list, each homonymous type has as its lang_struct + field the original TYPE_LANG_STRUCT type. This is a dirty + trick, see the new_structure function for details. */ + type_p lang_struct; + + type_p base_class; /* the parent class, if any. */ + + /* The following two fields are not serialized in state files, and + are instead reconstructed on load. */ + + /* The head of a singly-linked list of immediate descendents in + the inheritance hierarchy. */ + type_p first_subclass; + /* The next in that list. */ + type_p next_sibling_class; + + /* Have we already written ggc/pch user func for ptr to this? + (in write_user_func_for_structure_ptr). */ + bool wrote_user_func_for_ptr[NUM_WTK]; + } s; + + /* when TYPE_SCALAR: */ + bool scalar_is_char; + + /* when TYPE_ARRAY: */ + struct { + type_p p; /* The array component type. */ + const char *len; /* The string if any giving its length. */ + } a; + + } u; +}; + +/* The one and only TYPE_STRING. */ +extern struct type string_type; + +/* The two and only TYPE_SCALARs. Their u.scalar_is_char flags are + set early in main. */ +extern struct type scalar_nonchar; +extern struct type scalar_char; + +/* The one and only TYPE_CALLBACK. */ +extern struct type callback_type; + +/* Test if a type is a union, either a plain one or a language + specific one. */ +#define UNION_P(x) \ + ((x)->kind == TYPE_UNION \ + || ((x)->kind == TYPE_LANG_STRUCT \ + && (x)->u.s.lang_struct->kind == TYPE_UNION)) + +/* Test if a type is a union or a structure, perhaps a language + specific one. */ +static inline bool +union_or_struct_p (enum typekind kind) +{ + return (kind == TYPE_UNION + || kind == TYPE_STRUCT + || kind == TYPE_LANG_STRUCT + || kind == TYPE_USER_STRUCT); +} + +static inline bool +union_or_struct_p (const_type_p x) +{ + return union_or_struct_p (x->kind); +} + +/* Give the file location of a type, if any. */ +static inline struct fileloc* +type_fileloc (type_p t) +{ + if (!t) + return NULL; + if (union_or_struct_p (t)) + return &t->u.s.line; + return NULL; +} + +/* Structure representing an output file. */ +struct outf +{ + struct outf *next; + const char *name; + size_t buflength; + size_t bufused; + char *buf; +}; +typedef struct outf *outf_p; + +/* The list of output files. */ +extern outf_p output_files; + +/* The output header file that is included into pretty much every + source file. */ +extern outf_p header_file; + +/* Print, like fprintf, to O. No-op if O is NULL. */ +void +oprintf (outf_p o, const char *S, ...) + ATTRIBUTE_PRINTF_2; + +/* An output file, suitable for definitions, that can see declarations + made in INPF and is linked into every language that uses INPF. May + return NULL in plugin mode. The INPF argument is almost const, but + since the result is cached in its inpoutf field it cannot be + declared const. */ +outf_p get_output_file_with_visibility (input_file* inpf); + +/* The name of an output file, suitable for definitions, that can see + declarations made in INPF and is linked into every language that + uses INPF. May return NULL. */ +const char *get_output_file_name (input_file *inpf); + + +/* Source directory. */ +extern const char *srcdir; /* (-S) program argument. */ + +/* Length of srcdir name. */ +extern size_t srcdir_len; + +/* Variable used for reading and writing the state. */ +extern const char *read_state_filename; /* (-r) program argument. */ +extern const char *write_state_filename; /* (-w) program argument. */ + +/* Functions reading and writing the entire gengtype state, called from + main, and implemented in file gengtype-state.cc. */ +void read_state (const char* path); +/* Write the state, and update the state_number field in types. */ +void write_state (const char* path); + + +/* Print an error message. */ +extern void error_at_line +(const struct fileloc *pos, const char *msg, ...) ATTRIBUTE_PRINTF_2; + +/* Constructor routines for types. */ +extern void do_typedef (const char *s, type_p t, struct fileloc *pos); +extern void do_scalar_typedef (const char *s, struct fileloc *pos); +extern type_p resolve_typedef (const char *s, struct fileloc *pos); +extern void add_subclass (type_p base, type_p subclass); +extern type_p new_structure (const char *name, enum typekind kind, + struct fileloc *pos, pair_p fields, + options_p o, type_p base); +type_p create_user_defined_type (const char *, struct fileloc *); +extern type_p find_structure (const char *s, enum typekind kind); +extern type_p create_scalar_type (const char *name); +extern type_p create_pointer (type_p t); +extern type_p create_array (type_p t, const char *len); +extern pair_p create_field_at (pair_p next, type_p type, + const char *name, options_p opt, + struct fileloc *pos); +extern pair_p nreverse_pairs (pair_p list); +extern type_p adjust_field_type (type_p, options_p); +extern void note_variable (const char *s, type_p t, options_p o, + struct fileloc *pos); + +/* Lexer and parser routines. */ +extern int yylex (const char **yylval); +extern void yybegin (const char *fname); +extern void yyend (void); +extern void parse_file (const char *name); +extern bool hit_error; + +/* Token codes. */ +enum gty_token +{ + EOF_TOKEN = 0, + + /* Per standard convention, codes in the range (0, UCHAR_MAX] + represent single characters with those character codes. */ + CHAR_TOKEN_OFFSET = UCHAR_MAX + 1, + GTY_TOKEN = CHAR_TOKEN_OFFSET, + TYPEDEF, + EXTERN, + STATIC, + UNION, + STRUCT, + ENUM, + ELLIPSIS, + PTR_ALIAS, + NESTED_PTR, + USER_GTY, + NUM, + SCALAR, + ID, + STRING, + CHAR, + ARRAY, + IGNORABLE_CXX_KEYWORD, + + /* print_token assumes that any token >= FIRST_TOKEN_WITH_VALUE may have + a meaningful value to be printed. */ + FIRST_TOKEN_WITH_VALUE = USER_GTY +}; + + +/* Level for verbose messages, e.g. output file generation... */ +extern int verbosity_level; /* (-v) program argument. */ + +/* For debugging purposes we provide two flags. */ + +/* Dump everything to understand gengtype's state. Might be useful to + gengtype users. */ +extern int do_dump; /* (-d) program argument. */ + +/* Trace the execution by many DBGPRINTF (with the position inside + gengtype source code). Only useful to debug gengtype itself. */ +extern int do_debug; /* (-D) program argument. */ + +#define DBGPRINTF(Fmt,...) do {if (do_debug) \ + fprintf (stderr, "%s:%d: " Fmt "\n", \ + lbasename (__FILE__),__LINE__, ##__VA_ARGS__);} while (0) +void dbgprint_count_type_at (const char *, int, const char *, type_p); +#define DBGPRINT_COUNT_TYPE(Msg,Ty) do {if (do_debug) \ + dbgprint_count_type_at (__FILE__, __LINE__, Msg, Ty);}while (0) + +#define FOR_ALL_INHERITED_FIELDS(TYPE, FIELD_VAR) \ + for (type_p sub = (TYPE); sub; sub = sub->u.s.base_class) \ + for (FIELD_VAR = sub->u.s.fields; FIELD_VAR; FIELD_VAR = FIELD_VAR->next) + +extern bool +opts_have (options_p opts, const char *str); + + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/genrtl.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/genrtl.h new file mode 100644 index 0000000..a425e24 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/genrtl.h @@ -0,0 +1,1678 @@ +/* Generated automatically by gengenrtl from rtl.def. */ + +#ifndef GCC_GENRTL_H +#define GCC_GENRTL_H + +#include "statistics.h" + +static inline rtx +init_rtx_fmt_0 (rtx rt, machine_mode mode) +{ + PUT_MODE_RAW (rt, mode); + X0EXP (rt, 0) = NULL_RTX; + return rt; +} + +static inline rtx +gen_rtx_fmt_0_stat (RTX_CODE code, machine_mode mode MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_0 (rt, mode); +} + +#define gen_rtx_fmt_0(c, m) \ + gen_rtx_fmt_0_stat ((c), (m) MEM_STAT_INFO) + +#define alloca_rtx_fmt_0(c, m) \ + init_rtx_fmt_0 (rtx_alloca ((c)), (m)) + +static inline rtx +init_rtx_fmt_ee (rtx rt, machine_mode mode, + rtx arg0, + rtx arg1) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_ee_stat (RTX_CODE code, machine_mode mode, + rtx arg0, + rtx arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_ee (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_ee(c, m, arg0, arg1) \ + gen_rtx_fmt_ee_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_ee(c, m, arg0, arg1) \ + init_rtx_fmt_ee (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_ue (rtx rt, machine_mode mode, + rtx arg0, + rtx arg1) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_ue_stat (RTX_CODE code, machine_mode mode, + rtx arg0, + rtx arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_ue (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_ue(c, m, arg0, arg1) \ + gen_rtx_fmt_ue_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_ue(c, m, arg0, arg1) \ + init_rtx_fmt_ue (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_ie (rtx rt, machine_mode mode, + int arg0, + rtx arg1) +{ + PUT_MODE_RAW (rt, mode); + XINT (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_ie_stat (RTX_CODE code, machine_mode mode, + int arg0, + rtx arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_ie (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_ie(c, m, arg0, arg1) \ + gen_rtx_fmt_ie_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_ie(c, m, arg0, arg1) \ + init_rtx_fmt_ie (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_E (rtx rt, machine_mode mode, + rtvec arg0) +{ + PUT_MODE_RAW (rt, mode); + XVEC (rt, 0) = arg0; + return rt; +} + +static inline rtx +gen_rtx_fmt_E_stat (RTX_CODE code, machine_mode mode, + rtvec arg0 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_E (rt, mode, arg0); +} + +#define gen_rtx_fmt_E(c, m, arg0) \ + gen_rtx_fmt_E_stat ((c), (m), (arg0) MEM_STAT_INFO) + +#define alloca_rtx_fmt_E(c, m, arg0) \ + init_rtx_fmt_E (rtx_alloca ((c)), (m), (arg0)) + +static inline rtx +init_rtx_fmt_i (rtx rt, machine_mode mode, + int arg0) +{ + PUT_MODE_RAW (rt, mode); + XINT (rt, 0) = arg0; + return rt; +} + +static inline rtx +gen_rtx_fmt_i_stat (RTX_CODE code, machine_mode mode, + int arg0 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_i (rt, mode, arg0); +} + +#define gen_rtx_fmt_i(c, m, arg0) \ + gen_rtx_fmt_i_stat ((c), (m), (arg0) MEM_STAT_INFO) + +#define alloca_rtx_fmt_i(c, m, arg0) \ + init_rtx_fmt_i (rtx_alloca ((c)), (m), (arg0)) + +static inline rtx +init_rtx_fmt_uuBeiie (rtx rt, machine_mode mode, + rtx arg0, + rtx arg1, + basic_block arg2, + rtx arg3, + int arg4, + int arg5, + rtx arg6) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + XBBDEF (rt, 2) = arg2; + XEXP (rt, 3) = arg3; + XINT (rt, 4) = arg4; + XINT (rt, 5) = arg5; + XEXP (rt, 6) = arg6; + return rt; +} + +static inline rtx +gen_rtx_fmt_uuBeiie_stat (RTX_CODE code, machine_mode mode, + rtx arg0, + rtx arg1, + basic_block arg2, + rtx arg3, + int arg4, + int arg5, + rtx arg6 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_uuBeiie (rt, mode, arg0, arg1, arg2, arg3, arg4, arg5, arg6); +} + +#define gen_rtx_fmt_uuBeiie(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \ + gen_rtx_fmt_uuBeiie_stat ((c), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6) MEM_STAT_INFO) + +#define alloca_rtx_fmt_uuBeiie(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \ + init_rtx_fmt_uuBeiie (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6)) + +static inline rtx +init_rtx_fmt_uuBeiie0 (rtx rt, machine_mode mode, + rtx arg0, + rtx arg1, + basic_block arg2, + rtx arg3, + int arg4, + int arg5, + rtx arg6) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + XBBDEF (rt, 2) = arg2; + XEXP (rt, 3) = arg3; + XINT (rt, 4) = arg4; + XINT (rt, 5) = arg5; + XEXP (rt, 6) = arg6; + X0EXP (rt, 7) = NULL_RTX; + return rt; +} + +static inline rtx +gen_rtx_fmt_uuBeiie0_stat (RTX_CODE code, machine_mode mode, + rtx arg0, + rtx arg1, + basic_block arg2, + rtx arg3, + int arg4, + int arg5, + rtx arg6 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_uuBeiie0 (rt, mode, arg0, arg1, arg2, arg3, arg4, arg5, arg6); +} + +#define gen_rtx_fmt_uuBeiie0(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \ + gen_rtx_fmt_uuBeiie0_stat ((c), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6) MEM_STAT_INFO) + +#define alloca_rtx_fmt_uuBeiie0(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \ + init_rtx_fmt_uuBeiie0 (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6)) + +static inline rtx +init_rtx_fmt_uuBeiiee (rtx rt, machine_mode mode, + rtx arg0, + rtx arg1, + basic_block arg2, + rtx arg3, + int arg4, + int arg5, + rtx arg6, + rtx arg7) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + XBBDEF (rt, 2) = arg2; + XEXP (rt, 3) = arg3; + XINT (rt, 4) = arg4; + XINT (rt, 5) = arg5; + XEXP (rt, 6) = arg6; + XEXP (rt, 7) = arg7; + return rt; +} + +static inline rtx +gen_rtx_fmt_uuBeiiee_stat (RTX_CODE code, machine_mode mode, + rtx arg0, + rtx arg1, + basic_block arg2, + rtx arg3, + int arg4, + int arg5, + rtx arg6, + rtx arg7 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_uuBeiiee (rt, mode, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); +} + +#define gen_rtx_fmt_uuBeiiee(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \ + gen_rtx_fmt_uuBeiiee_stat ((c), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6), (arg7) MEM_STAT_INFO) + +#define alloca_rtx_fmt_uuBeiiee(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \ + init_rtx_fmt_uuBeiiee (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6), (arg7)) + +static inline rtx +init_rtx_fmt_uuBe0000 (rtx rt, machine_mode mode, + rtx arg0, + rtx arg1, + basic_block arg2, + rtx arg3) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + XBBDEF (rt, 2) = arg2; + XEXP (rt, 3) = arg3; + X0EXP (rt, 4) = NULL_RTX; + X0EXP (rt, 5) = NULL_RTX; + X0EXP (rt, 6) = NULL_RTX; + X0EXP (rt, 7) = NULL_RTX; + return rt; +} + +static inline rtx +gen_rtx_fmt_uuBe0000_stat (RTX_CODE code, machine_mode mode, + rtx arg0, + rtx arg1, + basic_block arg2, + rtx arg3 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_uuBe0000 (rt, mode, arg0, arg1, arg2, arg3); +} + +#define gen_rtx_fmt_uuBe0000(c, m, arg0, arg1, arg2, arg3) \ + gen_rtx_fmt_uuBe0000_stat ((c), (m), (arg0), (arg1), (arg2), (arg3) MEM_STAT_INFO) + +#define alloca_rtx_fmt_uuBe0000(c, m, arg0, arg1, arg2, arg3) \ + init_rtx_fmt_uuBe0000 (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3)) + +static inline rtx +init_rtx_fmt_uu00000 (rtx rt, machine_mode mode, + rtx arg0, + rtx arg1) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + X0EXP (rt, 2) = NULL_RTX; + X0EXP (rt, 3) = NULL_RTX; + X0EXP (rt, 4) = NULL_RTX; + X0EXP (rt, 5) = NULL_RTX; + X0EXP (rt, 6) = NULL_RTX; + return rt; +} + +static inline rtx +gen_rtx_fmt_uu00000_stat (RTX_CODE code, machine_mode mode, + rtx arg0, + rtx arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_uu00000 (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_uu00000(c, m, arg0, arg1) \ + gen_rtx_fmt_uu00000_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_uu00000(c, m, arg0, arg1) \ + init_rtx_fmt_uu00000 (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_uuB00is (rtx rt, machine_mode mode, + rtx arg0, + rtx arg1, + basic_block arg2, + int arg3, + const char *arg4) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + XBBDEF (rt, 2) = arg2; + X0EXP (rt, 3) = NULL_RTX; + X0EXP (rt, 4) = NULL_RTX; + XINT (rt, 5) = arg3; + XSTR (rt, 6) = arg4; + return rt; +} + +static inline rtx +gen_rtx_fmt_uuB00is_stat (RTX_CODE code, machine_mode mode, + rtx arg0, + rtx arg1, + basic_block arg2, + int arg3, + const char *arg4 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_uuB00is (rt, mode, arg0, arg1, arg2, arg3, arg4); +} + +#define gen_rtx_fmt_uuB00is(c, m, arg0, arg1, arg2, arg3, arg4) \ + gen_rtx_fmt_uuB00is_stat ((c), (m), (arg0), (arg1), (arg2), (arg3), (arg4) MEM_STAT_INFO) + +#define alloca_rtx_fmt_uuB00is(c, m, arg0, arg1, arg2, arg3, arg4) \ + init_rtx_fmt_uuB00is (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3), (arg4)) + +static inline rtx +init_rtx_fmt_si (rtx rt, machine_mode mode, + const char *arg0, + int arg1) +{ + PUT_MODE_RAW (rt, mode); + XSTR (rt, 0) = arg0; + XINT (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_si_stat (RTX_CODE code, machine_mode mode, + const char *arg0, + int arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_si (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_si(c, m, arg0, arg1) \ + gen_rtx_fmt_si_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_si(c, m, arg0, arg1) \ + init_rtx_fmt_si (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_ssiEEEi (rtx rt, machine_mode mode, + const char *arg0, + const char *arg1, + int arg2, + rtvec arg3, + rtvec arg4, + rtvec arg5, + int arg6) +{ + PUT_MODE_RAW (rt, mode); + XSTR (rt, 0) = arg0; + XSTR (rt, 1) = arg1; + XINT (rt, 2) = arg2; + XVEC (rt, 3) = arg3; + XVEC (rt, 4) = arg4; + XVEC (rt, 5) = arg5; + XINT (rt, 6) = arg6; + return rt; +} + +static inline rtx +gen_rtx_fmt_ssiEEEi_stat (RTX_CODE code, machine_mode mode, + const char *arg0, + const char *arg1, + int arg2, + rtvec arg3, + rtvec arg4, + rtvec arg5, + int arg6 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_ssiEEEi (rt, mode, arg0, arg1, arg2, arg3, arg4, arg5, arg6); +} + +#define gen_rtx_fmt_ssiEEEi(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \ + gen_rtx_fmt_ssiEEEi_stat ((c), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6) MEM_STAT_INFO) + +#define alloca_rtx_fmt_ssiEEEi(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \ + init_rtx_fmt_ssiEEEi (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6)) + +static inline rtx +init_rtx_fmt_Ei (rtx rt, machine_mode mode, + rtvec arg0, + int arg1) +{ + PUT_MODE_RAW (rt, mode); + XVEC (rt, 0) = arg0; + XINT (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_Ei_stat (RTX_CODE code, machine_mode mode, + rtvec arg0, + int arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_Ei (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_Ei(c, m, arg0, arg1) \ + gen_rtx_fmt_Ei_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_Ei(c, m, arg0, arg1) \ + init_rtx_fmt_Ei (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_eEee0 (rtx rt, machine_mode mode, + rtx arg0, + rtvec arg1, + rtx arg2, + rtx arg3) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + XVEC (rt, 1) = arg1; + XEXP (rt, 2) = arg2; + XEXP (rt, 3) = arg3; + X0EXP (rt, 4) = NULL_RTX; + return rt; +} + +static inline rtx +gen_rtx_fmt_eEee0_stat (RTX_CODE code, machine_mode mode, + rtx arg0, + rtvec arg1, + rtx arg2, + rtx arg3 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_eEee0 (rt, mode, arg0, arg1, arg2, arg3); +} + +#define gen_rtx_fmt_eEee0(c, m, arg0, arg1, arg2, arg3) \ + gen_rtx_fmt_eEee0_stat ((c), (m), (arg0), (arg1), (arg2), (arg3) MEM_STAT_INFO) + +#define alloca_rtx_fmt_eEee0(c, m, arg0, arg1, arg2, arg3) \ + init_rtx_fmt_eEee0 (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3)) + +static inline rtx +init_rtx_fmt_eee (rtx rt, machine_mode mode, + rtx arg0, + rtx arg1, + rtx arg2) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + XEXP (rt, 2) = arg2; + return rt; +} + +static inline rtx +gen_rtx_fmt_eee_stat (RTX_CODE code, machine_mode mode, + rtx arg0, + rtx arg1, + rtx arg2 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_eee (rt, mode, arg0, arg1, arg2); +} + +#define gen_rtx_fmt_eee(c, m, arg0, arg1, arg2) \ + gen_rtx_fmt_eee_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO) + +#define alloca_rtx_fmt_eee(c, m, arg0, arg1, arg2) \ + init_rtx_fmt_eee (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2)) + +static inline rtx +init_rtx_fmt_e (rtx rt, machine_mode mode, + rtx arg0) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + return rt; +} + +static inline rtx +gen_rtx_fmt_e_stat (RTX_CODE code, machine_mode mode, + rtx arg0 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_e (rt, mode, arg0); +} + +#define gen_rtx_fmt_e(c, m, arg0) \ + gen_rtx_fmt_e_stat ((c), (m), (arg0) MEM_STAT_INFO) + +#define alloca_rtx_fmt_e(c, m, arg0) \ + init_rtx_fmt_e (rtx_alloca ((c)), (m), (arg0)) + +static inline rtx +init_rtx_fmt_ (rtx rt, machine_mode mode) +{ + PUT_MODE_RAW (rt, mode); + return rt; +} + +static inline rtx +gen_rtx_fmt__stat (RTX_CODE code, machine_mode mode MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_ (rt, mode); +} + +#define gen_rtx_fmt_(c, m) \ + gen_rtx_fmt__stat ((c), (m) MEM_STAT_INFO) + +#define alloca_rtx_fmt_(c, m) \ + init_rtx_fmt_ (rtx_alloca ((c)), (m)) + +static inline rtx +init_rtx_fmt_w (rtx rt, machine_mode mode, + HOST_WIDE_INT arg0) +{ + PUT_MODE_RAW (rt, mode); + XWINT (rt, 0) = arg0; + return rt; +} + +static inline rtx +gen_rtx_fmt_w_stat (RTX_CODE code, machine_mode mode, + HOST_WIDE_INT arg0 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_w (rt, mode, arg0); +} + +#define gen_rtx_fmt_w(c, m, arg0) \ + gen_rtx_fmt_w_stat ((c), (m), (arg0) MEM_STAT_INFO) + +#define alloca_rtx_fmt_w(c, m, arg0) \ + init_rtx_fmt_w (rtx_alloca ((c)), (m), (arg0)) + +static inline rtx +init_rtx_fmt_www (rtx rt, machine_mode mode, + HOST_WIDE_INT arg0, + HOST_WIDE_INT arg1, + HOST_WIDE_INT arg2) +{ + PUT_MODE_RAW (rt, mode); + XWINT (rt, 0) = arg0; + XWINT (rt, 1) = arg1; + XWINT (rt, 2) = arg2; + return rt; +} + +static inline rtx +gen_rtx_fmt_www_stat (RTX_CODE code, machine_mode mode, + HOST_WIDE_INT arg0, + HOST_WIDE_INT arg1, + HOST_WIDE_INT arg2 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_www (rt, mode, arg0, arg1, arg2); +} + +#define gen_rtx_fmt_www(c, m, arg0, arg1, arg2) \ + gen_rtx_fmt_www_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO) + +#define alloca_rtx_fmt_www(c, m, arg0, arg1, arg2) \ + init_rtx_fmt_www (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2)) + +static inline rtx +init_rtx_fmt_s (rtx rt, machine_mode mode, + const char *arg0) +{ + PUT_MODE_RAW (rt, mode); + XSTR (rt, 0) = arg0; + return rt; +} + +static inline rtx +gen_rtx_fmt_s_stat (RTX_CODE code, machine_mode mode, + const char *arg0 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_s (rt, mode, arg0); +} + +#define gen_rtx_fmt_s(c, m, arg0) \ + gen_rtx_fmt_s_stat ((c), (m), (arg0) MEM_STAT_INFO) + +#define alloca_rtx_fmt_s(c, m, arg0) \ + init_rtx_fmt_s (rtx_alloca ((c)), (m), (arg0)) + +static inline rtx +init_rtx_fmt_ep (rtx rt, machine_mode mode, + rtx arg0, + poly_uint16 arg1) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + SUBREG_BYTE (rt) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_ep_stat (RTX_CODE code, machine_mode mode, + rtx arg0, + poly_uint16 arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_ep (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_ep(c, m, arg0, arg1) \ + gen_rtx_fmt_ep_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_ep(c, m, arg0, arg1) \ + init_rtx_fmt_ep (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_e0 (rtx rt, machine_mode mode, + rtx arg0) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + X0EXP (rt, 1) = NULL_RTX; + return rt; +} + +static inline rtx +gen_rtx_fmt_e0_stat (RTX_CODE code, machine_mode mode, + rtx arg0 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_e0 (rt, mode, arg0); +} + +#define gen_rtx_fmt_e0(c, m, arg0) \ + gen_rtx_fmt_e0_stat ((c), (m), (arg0) MEM_STAT_INFO) + +#define alloca_rtx_fmt_e0(c, m, arg0) \ + init_rtx_fmt_e0 (rtx_alloca ((c)), (m), (arg0)) + +static inline rtx +init_rtx_fmt_u (rtx rt, machine_mode mode, + rtx arg0) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + return rt; +} + +static inline rtx +gen_rtx_fmt_u_stat (RTX_CODE code, machine_mode mode, + rtx arg0 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_u (rt, mode, arg0); +} + +#define gen_rtx_fmt_u(c, m, arg0) \ + gen_rtx_fmt_u_stat ((c), (m), (arg0) MEM_STAT_INFO) + +#define alloca_rtx_fmt_u(c, m, arg0) \ + init_rtx_fmt_u (rtx_alloca ((c)), (m), (arg0)) + +static inline rtx +init_rtx_fmt_s0 (rtx rt, machine_mode mode, + const char *arg0) +{ + PUT_MODE_RAW (rt, mode); + XSTR (rt, 0) = arg0; + X0EXP (rt, 1) = NULL_RTX; + return rt; +} + +static inline rtx +gen_rtx_fmt_s0_stat (RTX_CODE code, machine_mode mode, + const char *arg0 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_s0 (rt, mode, arg0); +} + +#define gen_rtx_fmt_s0(c, m, arg0) \ + gen_rtx_fmt_s0_stat ((c), (m), (arg0) MEM_STAT_INFO) + +#define alloca_rtx_fmt_s0(c, m, arg0) \ + init_rtx_fmt_s0 (rtx_alloca ((c)), (m), (arg0)) + +static inline rtx +init_rtx_fmt_te (rtx rt, machine_mode mode, + tree arg0, + rtx arg1) +{ + PUT_MODE_RAW (rt, mode); + XTREE (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_te_stat (RTX_CODE code, machine_mode mode, + tree arg0, + rtx arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_te (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_te(c, m, arg0, arg1) \ + gen_rtx_fmt_te_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_te(c, m, arg0, arg1) \ + init_rtx_fmt_te (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_t (rtx rt, machine_mode mode, + tree arg0) +{ + PUT_MODE_RAW (rt, mode); + XTREE (rt, 0) = arg0; + return rt; +} + +static inline rtx +gen_rtx_fmt_t_stat (RTX_CODE code, machine_mode mode, + tree arg0 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_t (rt, mode, arg0); +} + +#define gen_rtx_fmt_t(c, m, arg0) \ + gen_rtx_fmt_t_stat ((c), (m), (arg0) MEM_STAT_INFO) + +#define alloca_rtx_fmt_t(c, m, arg0) \ + init_rtx_fmt_t (rtx_alloca ((c)), (m), (arg0)) + +static inline rtx +init_rtx_fmt_iss (rtx rt, machine_mode mode, + int arg0, + const char *arg1, + const char *arg2) +{ + PUT_MODE_RAW (rt, mode); + XINT (rt, 0) = arg0; + XSTR (rt, 1) = arg1; + XSTR (rt, 2) = arg2; + return rt; +} + +static inline rtx +gen_rtx_fmt_iss_stat (RTX_CODE code, machine_mode mode, + int arg0, + const char *arg1, + const char *arg2 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_iss (rt, mode, arg0, arg1, arg2); +} + +#define gen_rtx_fmt_iss(c, m, arg0, arg1, arg2) \ + gen_rtx_fmt_iss_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO) + +#define alloca_rtx_fmt_iss(c, m, arg0, arg1, arg2) \ + init_rtx_fmt_iss (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2)) + +static inline rtx +init_rtx_fmt_is (rtx rt, machine_mode mode, + int arg0, + const char *arg1) +{ + PUT_MODE_RAW (rt, mode); + XINT (rt, 0) = arg0; + XSTR (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_is_stat (RTX_CODE code, machine_mode mode, + int arg0, + const char *arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_is (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_is(c, m, arg0, arg1) \ + gen_rtx_fmt_is_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_is(c, m, arg0, arg1) \ + init_rtx_fmt_is (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_isE (rtx rt, machine_mode mode, + int arg0, + const char *arg1, + rtvec arg2) +{ + PUT_MODE_RAW (rt, mode); + XINT (rt, 0) = arg0; + XSTR (rt, 1) = arg1; + XVEC (rt, 2) = arg2; + return rt; +} + +static inline rtx +gen_rtx_fmt_isE_stat (RTX_CODE code, machine_mode mode, + int arg0, + const char *arg1, + rtvec arg2 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_isE (rt, mode, arg0, arg1, arg2); +} + +#define gen_rtx_fmt_isE(c, m, arg0, arg1, arg2) \ + gen_rtx_fmt_isE_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO) + +#define alloca_rtx_fmt_isE(c, m, arg0, arg1, arg2) \ + init_rtx_fmt_isE (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2)) + +static inline rtx +init_rtx_fmt_iE (rtx rt, machine_mode mode, + int arg0, + rtvec arg1) +{ + PUT_MODE_RAW (rt, mode); + XINT (rt, 0) = arg0; + XVEC (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_iE_stat (RTX_CODE code, machine_mode mode, + int arg0, + rtvec arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_iE (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_iE(c, m, arg0, arg1) \ + gen_rtx_fmt_iE_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_iE(c, m, arg0, arg1) \ + init_rtx_fmt_iE (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_ss (rtx rt, machine_mode mode, + const char *arg0, + const char *arg1) +{ + PUT_MODE_RAW (rt, mode); + XSTR (rt, 0) = arg0; + XSTR (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_ss_stat (RTX_CODE code, machine_mode mode, + const char *arg0, + const char *arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_ss (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_ss(c, m, arg0, arg1) \ + gen_rtx_fmt_ss_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_ss(c, m, arg0, arg1) \ + init_rtx_fmt_ss (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_eE (rtx rt, machine_mode mode, + rtx arg0, + rtvec arg1) +{ + PUT_MODE_RAW (rt, mode); + XEXP (rt, 0) = arg0; + XVEC (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_eE_stat (RTX_CODE code, machine_mode mode, + rtx arg0, + rtvec arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_eE (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_eE(c, m, arg0, arg1) \ + gen_rtx_fmt_eE_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_eE(c, m, arg0, arg1) \ + init_rtx_fmt_eE (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_ses (rtx rt, machine_mode mode, + const char *arg0, + rtx arg1, + const char *arg2) +{ + PUT_MODE_RAW (rt, mode); + XSTR (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + XSTR (rt, 2) = arg2; + return rt; +} + +static inline rtx +gen_rtx_fmt_ses_stat (RTX_CODE code, machine_mode mode, + const char *arg0, + rtx arg1, + const char *arg2 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_ses (rt, mode, arg0, arg1, arg2); +} + +#define gen_rtx_fmt_ses(c, m, arg0, arg1, arg2) \ + gen_rtx_fmt_ses_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO) + +#define alloca_rtx_fmt_ses(c, m, arg0, arg1, arg2) \ + init_rtx_fmt_ses (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2)) + +static inline rtx +init_rtx_fmt_sss (rtx rt, machine_mode mode, + const char *arg0, + const char *arg1, + const char *arg2) +{ + PUT_MODE_RAW (rt, mode); + XSTR (rt, 0) = arg0; + XSTR (rt, 1) = arg1; + XSTR (rt, 2) = arg2; + return rt; +} + +static inline rtx +gen_rtx_fmt_sss_stat (RTX_CODE code, machine_mode mode, + const char *arg0, + const char *arg1, + const char *arg2 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_sss (rt, mode, arg0, arg1, arg2); +} + +#define gen_rtx_fmt_sss(c, m, arg0, arg1, arg2) \ + gen_rtx_fmt_sss_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO) + +#define alloca_rtx_fmt_sss(c, m, arg0, arg1, arg2) \ + init_rtx_fmt_sss (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2)) + +static inline rtx +init_rtx_fmt_sse (rtx rt, machine_mode mode, + const char *arg0, + const char *arg1, + rtx arg2) +{ + PUT_MODE_RAW (rt, mode); + XSTR (rt, 0) = arg0; + XSTR (rt, 1) = arg1; + XEXP (rt, 2) = arg2; + return rt; +} + +static inline rtx +gen_rtx_fmt_sse_stat (RTX_CODE code, machine_mode mode, + const char *arg0, + const char *arg1, + rtx arg2 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_sse (rt, mode, arg0, arg1, arg2); +} + +#define gen_rtx_fmt_sse(c, m, arg0, arg1, arg2) \ + gen_rtx_fmt_sse_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO) + +#define alloca_rtx_fmt_sse(c, m, arg0, arg1, arg2) \ + init_rtx_fmt_sse (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2)) + +static inline rtx +init_rtx_fmt_sies (rtx rt, machine_mode mode, + const char *arg0, + int arg1, + rtx arg2, + const char *arg3) +{ + PUT_MODE_RAW (rt, mode); + XSTR (rt, 0) = arg0; + XINT (rt, 1) = arg1; + XEXP (rt, 2) = arg2; + XSTR (rt, 3) = arg3; + return rt; +} + +static inline rtx +gen_rtx_fmt_sies_stat (RTX_CODE code, machine_mode mode, + const char *arg0, + int arg1, + rtx arg2, + const char *arg3 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_sies (rt, mode, arg0, arg1, arg2, arg3); +} + +#define gen_rtx_fmt_sies(c, m, arg0, arg1, arg2, arg3) \ + gen_rtx_fmt_sies_stat ((c), (m), (arg0), (arg1), (arg2), (arg3) MEM_STAT_INFO) + +#define alloca_rtx_fmt_sies(c, m, arg0, arg1, arg2, arg3) \ + init_rtx_fmt_sies (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3)) + +static inline rtx +init_rtx_fmt_sE (rtx rt, machine_mode mode, + const char *arg0, + rtvec arg1) +{ + PUT_MODE_RAW (rt, mode); + XSTR (rt, 0) = arg0; + XVEC (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_sE_stat (RTX_CODE code, machine_mode mode, + const char *arg0, + rtvec arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_sE (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_sE(c, m, arg0, arg1) \ + gen_rtx_fmt_sE_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_sE(c, m, arg0, arg1) \ + init_rtx_fmt_sE (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_ww (rtx rt, machine_mode mode, + HOST_WIDE_INT arg0, + HOST_WIDE_INT arg1) +{ + PUT_MODE_RAW (rt, mode); + XWINT (rt, 0) = arg0; + XWINT (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_ww_stat (RTX_CODE code, machine_mode mode, + HOST_WIDE_INT arg0, + HOST_WIDE_INT arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_ww (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_ww(c, m, arg0, arg1) \ + gen_rtx_fmt_ww_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_ww(c, m, arg0, arg1) \ + init_rtx_fmt_ww (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_Ee (rtx rt, machine_mode mode, + rtvec arg0, + rtx arg1) +{ + PUT_MODE_RAW (rt, mode); + XVEC (rt, 0) = arg0; + XEXP (rt, 1) = arg1; + return rt; +} + +static inline rtx +gen_rtx_fmt_Ee_stat (RTX_CODE code, machine_mode mode, + rtvec arg0, + rtx arg1 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_Ee (rt, mode, arg0, arg1); +} + +#define gen_rtx_fmt_Ee(c, m, arg0, arg1) \ + gen_rtx_fmt_Ee_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO) + +#define alloca_rtx_fmt_Ee(c, m, arg0, arg1) \ + init_rtx_fmt_Ee (rtx_alloca ((c)), (m), (arg0), (arg1)) + +static inline rtx +init_rtx_fmt_sEsE (rtx rt, machine_mode mode, + const char *arg0, + rtvec arg1, + const char *arg2, + rtvec arg3) +{ + PUT_MODE_RAW (rt, mode); + XSTR (rt, 0) = arg0; + XVEC (rt, 1) = arg1; + XSTR (rt, 2) = arg2; + XVEC (rt, 3) = arg3; + return rt; +} + +static inline rtx +gen_rtx_fmt_sEsE_stat (RTX_CODE code, machine_mode mode, + const char *arg0, + rtvec arg1, + const char *arg2, + rtvec arg3 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_sEsE (rt, mode, arg0, arg1, arg2, arg3); +} + +#define gen_rtx_fmt_sEsE(c, m, arg0, arg1, arg2, arg3) \ + gen_rtx_fmt_sEsE_stat ((c), (m), (arg0), (arg1), (arg2), (arg3) MEM_STAT_INFO) + +#define alloca_rtx_fmt_sEsE(c, m, arg0, arg1, arg2, arg3) \ + init_rtx_fmt_sEsE (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3)) + +static inline rtx +init_rtx_fmt_ssss (rtx rt, machine_mode mode, + const char *arg0, + const char *arg1, + const char *arg2, + const char *arg3) +{ + PUT_MODE_RAW (rt, mode); + XSTR (rt, 0) = arg0; + XSTR (rt, 1) = arg1; + XSTR (rt, 2) = arg2; + XSTR (rt, 3) = arg3; + return rt; +} + +static inline rtx +gen_rtx_fmt_ssss_stat (RTX_CODE code, machine_mode mode, + const char *arg0, + const char *arg1, + const char *arg2, + const char *arg3 MEM_STAT_DECL) +{ + rtx rt; + + rt = rtx_alloc (code PASS_MEM_STAT); + return init_rtx_fmt_ssss (rt, mode, arg0, arg1, arg2, arg3); +} + +#define gen_rtx_fmt_ssss(c, m, arg0, arg1, arg2, arg3) \ + gen_rtx_fmt_ssss_stat ((c), (m), (arg0), (arg1), (arg2), (arg3) MEM_STAT_INFO) + +#define alloca_rtx_fmt_ssss(c, m, arg0, arg1, arg2, arg3) \ + init_rtx_fmt_ssss (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3)) + + +#define gen_rtx_VALUE(MODE) \ + gen_rtx_fmt_0 (VALUE, (MODE)) +#define gen_rtx_DEBUG_EXPR(MODE) \ + gen_rtx_fmt_0 (DEBUG_EXPR, (MODE)) +#define gen_rtx_raw_EXPR_LIST(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (EXPR_LIST, (MODE), (ARG0), (ARG1)) +#define gen_rtx_raw_INSN_LIST(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ue (INSN_LIST, (MODE), (ARG0), (ARG1)) +#define gen_rtx_INT_LIST(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ie (INT_LIST, (MODE), (ARG0), (ARG1)) +#define gen_rtx_SEQUENCE(MODE, ARG0) \ + gen_rtx_fmt_E (SEQUENCE, (MODE), (ARG0)) +#define gen_rtx_ADDRESS(MODE, ARG0) \ + gen_rtx_fmt_i (ADDRESS, (MODE), (ARG0)) +#define gen_rtx_DEBUG_INSN(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6) \ + gen_rtx_fmt_uuBeiie (DEBUG_INSN, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6)) +#define gen_rtx_raw_INSN(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6) \ + gen_rtx_fmt_uuBeiie (INSN, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6)) +#define gen_rtx_JUMP_INSN(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6) \ + gen_rtx_fmt_uuBeiie0 (JUMP_INSN, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6)) +#define gen_rtx_CALL_INSN(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7) \ + gen_rtx_fmt_uuBeiiee (CALL_INSN, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6), (ARG7)) +#define gen_rtx_JUMP_TABLE_DATA(MODE, ARG0, ARG1, ARG2, ARG3) \ + gen_rtx_fmt_uuBe0000 (JUMP_TABLE_DATA, (MODE), (ARG0), (ARG1), (ARG2), (ARG3)) +#define gen_rtx_BARRIER(MODE, ARG0, ARG1) \ + gen_rtx_fmt_uu00000 (BARRIER, (MODE), (ARG0), (ARG1)) +#define gen_rtx_CODE_LABEL(MODE, ARG0, ARG1, ARG2, ARG3, ARG4) \ + gen_rtx_fmt_uuB00is (CODE_LABEL, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4)) +#define gen_rtx_COND_EXEC(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (COND_EXEC, (MODE), (ARG0), (ARG1)) +#define gen_rtx_PARALLEL(MODE, ARG0) \ + gen_rtx_fmt_E (PARALLEL, (MODE), (ARG0)) +#define gen_rtx_ASM_INPUT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_si (ASM_INPUT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_ASM_OPERANDS(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6) \ + gen_rtx_fmt_ssiEEEi (ASM_OPERANDS, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6)) +#define gen_rtx_UNSPEC(MODE, ARG0, ARG1) \ + gen_rtx_fmt_Ei (UNSPEC, (MODE), (ARG0), (ARG1)) +#define gen_rtx_UNSPEC_VOLATILE(MODE, ARG0, ARG1) \ + gen_rtx_fmt_Ei (UNSPEC_VOLATILE, (MODE), (ARG0), (ARG1)) +#define gen_rtx_ADDR_VEC(MODE, ARG0) \ + gen_rtx_fmt_E (ADDR_VEC, (MODE), (ARG0)) +#define gen_rtx_ADDR_DIFF_VEC(MODE, ARG0, ARG1, ARG2, ARG3) \ + gen_rtx_fmt_eEee0 (ADDR_DIFF_VEC, (MODE), (ARG0), (ARG1), (ARG2), (ARG3)) +#define gen_rtx_PREFETCH(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_eee (PREFETCH, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_SET(ARG0, ARG1) \ + gen_rtx_fmt_ee (SET, VOIDmode, (ARG0), (ARG1)) +#define gen_rtx_USE(MODE, ARG0) \ + gen_rtx_fmt_e (USE, (MODE), (ARG0)) +#define gen_rtx_CLOBBER(MODE, ARG0) \ + gen_rtx_fmt_e (CLOBBER, (MODE), (ARG0)) +#define gen_rtx_CALL(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (CALL, (MODE), (ARG0), (ARG1)) +#define gen_rtx_raw_RETURN(MODE) \ + gen_rtx_fmt_ (RETURN, (MODE)) +#define gen_rtx_raw_SIMPLE_RETURN(MODE) \ + gen_rtx_fmt_ (SIMPLE_RETURN, (MODE)) +#define gen_rtx_EH_RETURN(MODE) \ + gen_rtx_fmt_ (EH_RETURN, (MODE)) +#define gen_rtx_TRAP_IF(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (TRAP_IF, (MODE), (ARG0), (ARG1)) +#define gen_rtx_raw_CONST_INT(MODE, ARG0) \ + gen_rtx_fmt_w (CONST_INT, (MODE), (ARG0)) +#define gen_rtx_raw_CONST_VECTOR(MODE, ARG0) \ + gen_rtx_fmt_E (CONST_VECTOR, (MODE), (ARG0)) +#define gen_rtx_CONST_STRING(MODE, ARG0) \ + gen_rtx_fmt_s (CONST_STRING, (MODE), (ARG0)) +#define gen_rtx_CONST(MODE, ARG0) \ + gen_rtx_fmt_e (CONST, (MODE), (ARG0)) +#define gen_rtx_raw_PC(MODE) \ + gen_rtx_fmt_ (PC, (MODE)) +#define gen_rtx_SCRATCH(MODE) \ + gen_rtx_fmt_ (SCRATCH, (MODE)) +#define gen_rtx_raw_SUBREG(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ep (SUBREG, (MODE), (ARG0), (ARG1)) +#define gen_rtx_STRICT_LOW_PART(MODE, ARG0) \ + gen_rtx_fmt_e (STRICT_LOW_PART, (MODE), (ARG0)) +#define gen_rtx_CONCAT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (CONCAT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_CONCATN(MODE, ARG0) \ + gen_rtx_fmt_E (CONCATN, (MODE), (ARG0)) +#define gen_rtx_raw_MEM(MODE, ARG0) \ + gen_rtx_fmt_e0 (MEM, (MODE), (ARG0)) +#define gen_rtx_LABEL_REF(MODE, ARG0) \ + gen_rtx_fmt_u (LABEL_REF, (MODE), (ARG0)) +#define gen_rtx_SYMBOL_REF(MODE, ARG0) \ + gen_rtx_fmt_s0 (SYMBOL_REF, (MODE), (ARG0)) +#define gen_rtx_IF_THEN_ELSE(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_eee (IF_THEN_ELSE, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_COMPARE(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (COMPARE, (MODE), (ARG0), (ARG1)) +#define gen_rtx_PLUS(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (PLUS, (MODE), (ARG0), (ARG1)) +#define gen_rtx_MINUS(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (MINUS, (MODE), (ARG0), (ARG1)) +#define gen_rtx_NEG(MODE, ARG0) \ + gen_rtx_fmt_e (NEG, (MODE), (ARG0)) +#define gen_rtx_MULT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (MULT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_SS_MULT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (SS_MULT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_US_MULT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (US_MULT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_SMUL_HIGHPART(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (SMUL_HIGHPART, (MODE), (ARG0), (ARG1)) +#define gen_rtx_UMUL_HIGHPART(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (UMUL_HIGHPART, (MODE), (ARG0), (ARG1)) +#define gen_rtx_DIV(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (DIV, (MODE), (ARG0), (ARG1)) +#define gen_rtx_SS_DIV(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (SS_DIV, (MODE), (ARG0), (ARG1)) +#define gen_rtx_US_DIV(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (US_DIV, (MODE), (ARG0), (ARG1)) +#define gen_rtx_MOD(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (MOD, (MODE), (ARG0), (ARG1)) +#define gen_rtx_UDIV(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (UDIV, (MODE), (ARG0), (ARG1)) +#define gen_rtx_UMOD(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (UMOD, (MODE), (ARG0), (ARG1)) +#define gen_rtx_AND(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (AND, (MODE), (ARG0), (ARG1)) +#define gen_rtx_IOR(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (IOR, (MODE), (ARG0), (ARG1)) +#define gen_rtx_XOR(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (XOR, (MODE), (ARG0), (ARG1)) +#define gen_rtx_NOT(MODE, ARG0) \ + gen_rtx_fmt_e (NOT, (MODE), (ARG0)) +#define gen_rtx_ASHIFT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (ASHIFT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_ROTATE(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (ROTATE, (MODE), (ARG0), (ARG1)) +#define gen_rtx_ASHIFTRT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (ASHIFTRT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_LSHIFTRT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (LSHIFTRT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_ROTATERT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (ROTATERT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_SMIN(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (SMIN, (MODE), (ARG0), (ARG1)) +#define gen_rtx_SMAX(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (SMAX, (MODE), (ARG0), (ARG1)) +#define gen_rtx_UMIN(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (UMIN, (MODE), (ARG0), (ARG1)) +#define gen_rtx_UMAX(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (UMAX, (MODE), (ARG0), (ARG1)) +#define gen_rtx_PRE_DEC(MODE, ARG0) \ + gen_rtx_fmt_e (PRE_DEC, (MODE), (ARG0)) +#define gen_rtx_PRE_INC(MODE, ARG0) \ + gen_rtx_fmt_e (PRE_INC, (MODE), (ARG0)) +#define gen_rtx_POST_DEC(MODE, ARG0) \ + gen_rtx_fmt_e (POST_DEC, (MODE), (ARG0)) +#define gen_rtx_POST_INC(MODE, ARG0) \ + gen_rtx_fmt_e (POST_INC, (MODE), (ARG0)) +#define gen_rtx_PRE_MODIFY(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (PRE_MODIFY, (MODE), (ARG0), (ARG1)) +#define gen_rtx_POST_MODIFY(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (POST_MODIFY, (MODE), (ARG0), (ARG1)) +#define gen_rtx_NE(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (NE, (MODE), (ARG0), (ARG1)) +#define gen_rtx_EQ(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (EQ, (MODE), (ARG0), (ARG1)) +#define gen_rtx_GE(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (GE, (MODE), (ARG0), (ARG1)) +#define gen_rtx_GT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (GT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_LE(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (LE, (MODE), (ARG0), (ARG1)) +#define gen_rtx_LT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (LT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_LTGT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (LTGT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_GEU(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (GEU, (MODE), (ARG0), (ARG1)) +#define gen_rtx_GTU(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (GTU, (MODE), (ARG0), (ARG1)) +#define gen_rtx_LEU(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (LEU, (MODE), (ARG0), (ARG1)) +#define gen_rtx_LTU(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (LTU, (MODE), (ARG0), (ARG1)) +#define gen_rtx_UNORDERED(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (UNORDERED, (MODE), (ARG0), (ARG1)) +#define gen_rtx_ORDERED(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (ORDERED, (MODE), (ARG0), (ARG1)) +#define gen_rtx_UNEQ(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (UNEQ, (MODE), (ARG0), (ARG1)) +#define gen_rtx_UNGE(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (UNGE, (MODE), (ARG0), (ARG1)) +#define gen_rtx_UNGT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (UNGT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_UNLE(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (UNLE, (MODE), (ARG0), (ARG1)) +#define gen_rtx_UNLT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (UNLT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_SIGN_EXTEND(MODE, ARG0) \ + gen_rtx_fmt_e (SIGN_EXTEND, (MODE), (ARG0)) +#define gen_rtx_ZERO_EXTEND(MODE, ARG0) \ + gen_rtx_fmt_e (ZERO_EXTEND, (MODE), (ARG0)) +#define gen_rtx_TRUNCATE(MODE, ARG0) \ + gen_rtx_fmt_e (TRUNCATE, (MODE), (ARG0)) +#define gen_rtx_FLOAT_EXTEND(MODE, ARG0) \ + gen_rtx_fmt_e (FLOAT_EXTEND, (MODE), (ARG0)) +#define gen_rtx_FLOAT_TRUNCATE(MODE, ARG0) \ + gen_rtx_fmt_e (FLOAT_TRUNCATE, (MODE), (ARG0)) +#define gen_rtx_FLOAT(MODE, ARG0) \ + gen_rtx_fmt_e (FLOAT, (MODE), (ARG0)) +#define gen_rtx_FIX(MODE, ARG0) \ + gen_rtx_fmt_e (FIX, (MODE), (ARG0)) +#define gen_rtx_UNSIGNED_FLOAT(MODE, ARG0) \ + gen_rtx_fmt_e (UNSIGNED_FLOAT, (MODE), (ARG0)) +#define gen_rtx_UNSIGNED_FIX(MODE, ARG0) \ + gen_rtx_fmt_e (UNSIGNED_FIX, (MODE), (ARG0)) +#define gen_rtx_FRACT_CONVERT(MODE, ARG0) \ + gen_rtx_fmt_e (FRACT_CONVERT, (MODE), (ARG0)) +#define gen_rtx_UNSIGNED_FRACT_CONVERT(MODE, ARG0) \ + gen_rtx_fmt_e (UNSIGNED_FRACT_CONVERT, (MODE), (ARG0)) +#define gen_rtx_SAT_FRACT(MODE, ARG0) \ + gen_rtx_fmt_e (SAT_FRACT, (MODE), (ARG0)) +#define gen_rtx_UNSIGNED_SAT_FRACT(MODE, ARG0) \ + gen_rtx_fmt_e (UNSIGNED_SAT_FRACT, (MODE), (ARG0)) +#define gen_rtx_ABS(MODE, ARG0) \ + gen_rtx_fmt_e (ABS, (MODE), (ARG0)) +#define gen_rtx_SQRT(MODE, ARG0) \ + gen_rtx_fmt_e (SQRT, (MODE), (ARG0)) +#define gen_rtx_BSWAP(MODE, ARG0) \ + gen_rtx_fmt_e (BSWAP, (MODE), (ARG0)) +#define gen_rtx_FFS(MODE, ARG0) \ + gen_rtx_fmt_e (FFS, (MODE), (ARG0)) +#define gen_rtx_CLRSB(MODE, ARG0) \ + gen_rtx_fmt_e (CLRSB, (MODE), (ARG0)) +#define gen_rtx_CLZ(MODE, ARG0) \ + gen_rtx_fmt_e (CLZ, (MODE), (ARG0)) +#define gen_rtx_CTZ(MODE, ARG0) \ + gen_rtx_fmt_e (CTZ, (MODE), (ARG0)) +#define gen_rtx_POPCOUNT(MODE, ARG0) \ + gen_rtx_fmt_e (POPCOUNT, (MODE), (ARG0)) +#define gen_rtx_PARITY(MODE, ARG0) \ + gen_rtx_fmt_e (PARITY, (MODE), (ARG0)) +#define gen_rtx_SIGN_EXTRACT(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_eee (SIGN_EXTRACT, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_ZERO_EXTRACT(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_eee (ZERO_EXTRACT, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_HIGH(MODE, ARG0) \ + gen_rtx_fmt_e (HIGH, (MODE), (ARG0)) +#define gen_rtx_LO_SUM(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (LO_SUM, (MODE), (ARG0), (ARG1)) +#define gen_rtx_VEC_MERGE(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_eee (VEC_MERGE, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_VEC_SELECT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (VEC_SELECT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_VEC_CONCAT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (VEC_CONCAT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_VEC_DUPLICATE(MODE, ARG0) \ + gen_rtx_fmt_e (VEC_DUPLICATE, (MODE), (ARG0)) +#define gen_rtx_VEC_SERIES(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (VEC_SERIES, (MODE), (ARG0), (ARG1)) +#define gen_rtx_SS_PLUS(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (SS_PLUS, (MODE), (ARG0), (ARG1)) +#define gen_rtx_US_PLUS(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (US_PLUS, (MODE), (ARG0), (ARG1)) +#define gen_rtx_SS_MINUS(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (SS_MINUS, (MODE), (ARG0), (ARG1)) +#define gen_rtx_SS_NEG(MODE, ARG0) \ + gen_rtx_fmt_e (SS_NEG, (MODE), (ARG0)) +#define gen_rtx_US_NEG(MODE, ARG0) \ + gen_rtx_fmt_e (US_NEG, (MODE), (ARG0)) +#define gen_rtx_SS_ABS(MODE, ARG0) \ + gen_rtx_fmt_e (SS_ABS, (MODE), (ARG0)) +#define gen_rtx_SS_ASHIFT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (SS_ASHIFT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_US_ASHIFT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (US_ASHIFT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_US_MINUS(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ee (US_MINUS, (MODE), (ARG0), (ARG1)) +#define gen_rtx_SS_TRUNCATE(MODE, ARG0) \ + gen_rtx_fmt_e (SS_TRUNCATE, (MODE), (ARG0)) +#define gen_rtx_US_TRUNCATE(MODE, ARG0) \ + gen_rtx_fmt_e (US_TRUNCATE, (MODE), (ARG0)) +#define gen_rtx_FMA(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_eee (FMA, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_DEBUG_IMPLICIT_PTR(MODE, ARG0) \ + gen_rtx_fmt_t (DEBUG_IMPLICIT_PTR, (MODE), (ARG0)) +#define gen_rtx_ENTRY_VALUE(MODE) \ + gen_rtx_fmt_0 (ENTRY_VALUE, (MODE)) +#define gen_rtx_DEBUG_PARAMETER_REF(MODE, ARG0) \ + gen_rtx_fmt_t (DEBUG_PARAMETER_REF, (MODE), (ARG0)) +#define gen_rtx_DEBUG_MARKER(MODE) \ + gen_rtx_fmt_ (DEBUG_MARKER, (MODE)) +#define gen_rtx_MATCH_OPERAND(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_iss (MATCH_OPERAND, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_MATCH_SCRATCH(MODE, ARG0, ARG1) \ + gen_rtx_fmt_is (MATCH_SCRATCH, (MODE), (ARG0), (ARG1)) +#define gen_rtx_MATCH_OPERATOR(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_isE (MATCH_OPERATOR, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_MATCH_PARALLEL(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_isE (MATCH_PARALLEL, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_MATCH_DUP(MODE, ARG0) \ + gen_rtx_fmt_i (MATCH_DUP, (MODE), (ARG0)) +#define gen_rtx_MATCH_OP_DUP(MODE, ARG0, ARG1) \ + gen_rtx_fmt_iE (MATCH_OP_DUP, (MODE), (ARG0), (ARG1)) +#define gen_rtx_MATCH_PAR_DUP(MODE, ARG0, ARG1) \ + gen_rtx_fmt_iE (MATCH_PAR_DUP, (MODE), (ARG0), (ARG1)) +#define gen_rtx_MATCH_CODE(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ss (MATCH_CODE, (MODE), (ARG0), (ARG1)) +#define gen_rtx_MATCH_TEST(MODE, ARG0) \ + gen_rtx_fmt_s (MATCH_TEST, (MODE), (ARG0)) +#define gen_rtx_DEFINE_DELAY(MODE, ARG0, ARG1) \ + gen_rtx_fmt_eE (DEFINE_DELAY, (MODE), (ARG0), (ARG1)) +#define gen_rtx_DEFINE_PREDICATE(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_ses (DEFINE_PREDICATE, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_DEFINE_SPECIAL_PREDICATE(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_ses (DEFINE_SPECIAL_PREDICATE, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_DEFINE_REGISTER_CONSTRAINT(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_sss (DEFINE_REGISTER_CONSTRAINT, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_DEFINE_CONSTRAINT(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_sse (DEFINE_CONSTRAINT, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_DEFINE_MEMORY_CONSTRAINT(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_sse (DEFINE_MEMORY_CONSTRAINT, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_DEFINE_SPECIAL_MEMORY_CONSTRAINT(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_sse (DEFINE_SPECIAL_MEMORY_CONSTRAINT, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_DEFINE_RELAXED_MEMORY_CONSTRAINT(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_sse (DEFINE_RELAXED_MEMORY_CONSTRAINT, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_DEFINE_ADDRESS_CONSTRAINT(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_sse (DEFINE_ADDRESS_CONSTRAINT, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_EXCLUSION_SET(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ss (EXCLUSION_SET, (MODE), (ARG0), (ARG1)) +#define gen_rtx_PRESENCE_SET(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ss (PRESENCE_SET, (MODE), (ARG0), (ARG1)) +#define gen_rtx_FINAL_PRESENCE_SET(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ss (FINAL_PRESENCE_SET, (MODE), (ARG0), (ARG1)) +#define gen_rtx_ABSENCE_SET(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ss (ABSENCE_SET, (MODE), (ARG0), (ARG1)) +#define gen_rtx_FINAL_ABSENCE_SET(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ss (FINAL_ABSENCE_SET, (MODE), (ARG0), (ARG1)) +#define gen_rtx_DEFINE_AUTOMATON(MODE, ARG0) \ + gen_rtx_fmt_s (DEFINE_AUTOMATON, (MODE), (ARG0)) +#define gen_rtx_AUTOMATA_OPTION(MODE, ARG0) \ + gen_rtx_fmt_s (AUTOMATA_OPTION, (MODE), (ARG0)) +#define gen_rtx_DEFINE_RESERVATION(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ss (DEFINE_RESERVATION, (MODE), (ARG0), (ARG1)) +#define gen_rtx_DEFINE_INSN_RESERVATION(MODE, ARG0, ARG1, ARG2, ARG3) \ + gen_rtx_fmt_sies (DEFINE_INSN_RESERVATION, (MODE), (ARG0), (ARG1), (ARG2), (ARG3)) +#define gen_rtx_DEFINE_ATTR(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_sse (DEFINE_ATTR, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_DEFINE_ENUM_ATTR(MODE, ARG0, ARG1, ARG2) \ + gen_rtx_fmt_sse (DEFINE_ENUM_ATTR, (MODE), (ARG0), (ARG1), (ARG2)) +#define gen_rtx_ATTR(MODE, ARG0) \ + gen_rtx_fmt_s (ATTR, (MODE), (ARG0)) +#define gen_rtx_SET_ATTR(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ss (SET_ATTR, (MODE), (ARG0), (ARG1)) +#define gen_rtx_SET_ATTR_ALTERNATIVE(MODE, ARG0, ARG1) \ + gen_rtx_fmt_sE (SET_ATTR_ALTERNATIVE, (MODE), (ARG0), (ARG1)) +#define gen_rtx_EQ_ATTR(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ss (EQ_ATTR, (MODE), (ARG0), (ARG1)) +#define gen_rtx_EQ_ATTR_ALT(MODE, ARG0, ARG1) \ + gen_rtx_fmt_ww (EQ_ATTR_ALT, (MODE), (ARG0), (ARG1)) +#define gen_rtx_ATTR_FLAG(MODE, ARG0) \ + gen_rtx_fmt_s (ATTR_FLAG, (MODE), (ARG0)) +#define gen_rtx_COND(MODE, ARG0, ARG1) \ + gen_rtx_fmt_Ee (COND, (MODE), (ARG0), (ARG1)) +#define gen_rtx_DEFINE_SUBST(MODE, ARG0, ARG1, ARG2, ARG3) \ + gen_rtx_fmt_sEsE (DEFINE_SUBST, (MODE), (ARG0), (ARG1), (ARG2), (ARG3)) +#define gen_rtx_DEFINE_SUBST_ATTR(MODE, ARG0, ARG1, ARG2, ARG3) \ + gen_rtx_fmt_ssss (DEFINE_SUBST_ATTR, (MODE), (ARG0), (ARG1), (ARG2), (ARG3)) + +#endif /* GCC_GENRTL_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gensupport.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gensupport.h new file mode 100644 index 0000000..9a0fd73 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gensupport.h @@ -0,0 +1,228 @@ +/* Declarations for rtx-reader support for gen* routines. + Copyright (C) 2000-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GENSUPPORT_H +#define GCC_GENSUPPORT_H + +#include "read-md.h" + +struct obstack; +extern struct obstack *rtl_obstack; + +/* Information about an .md define_* rtx. */ +class md_rtx_info { +public: + /* The rtx itself. */ + rtx def; + + /* The location of the first line of the rtx. */ + file_location loc; + + /* The unique number attached to the rtx. Currently all define_insns, + define_expands, define_splits, define_peepholes and define_peephole2s + share the same insn_code index space. */ + int index; +}; + +#define OPTAB_CL(name, pat, c, b, l) name, +#define OPTAB_CX(name, pat) +#define OPTAB_CD(name, pat) name, +#define OPTAB_NL(name, pat, c, b, s, l) name, +#define OPTAB_NC(name, pat, c) name, +#define OPTAB_NX(name, pat) +#define OPTAB_VL(name, pat, c, b, s, l) name, +#define OPTAB_VC(name, pat, c) name, +#define OPTAB_VX(name, pat) +#define OPTAB_DC(name, pat, c) name, +#define OPTAB_D(name, pat) name, + +/* Enumerates all optabs. */ +typedef enum optab_tag { + unknown_optab, +#include "optabs.def" + NUM_OPTABS +} optab; + +#undef OPTAB_CL +#undef OPTAB_CX +#undef OPTAB_CD +#undef OPTAB_NL +#undef OPTAB_NC +#undef OPTAB_NX +#undef OPTAB_VL +#undef OPTAB_VC +#undef OPTAB_VX +#undef OPTAB_DC +#undef OPTAB_D + +/* Describes one entry in optabs.def. */ +struct optab_def +{ + /* The name of the optab (e.g. "add_optab"). */ + const char *name; + + /* The pattern that matching define_expands and define_insns have. + See the comment at the head of optabs.def for details. */ + const char *pattern; + + /* The initializers (in the form of C code) for the libcall_basename, + libcall_suffix and libcall_gen fields of (convert_)optab_libcall_d. */ + const char *base; + const char *suffix; + const char *libcall; + + /* The optab's enum value. */ + unsigned int op; + + /* The value returned by optab_to_code (OP). */ + enum rtx_code fcode; + + /* CODE if code_to_optab (CODE) should return OP, otherwise UNKNOWN. */ + enum rtx_code rcode; + + /* 1: conversion optabs with libcall data, + 2: conversion optabs without libcall data, + 3: non-conversion optabs with libcall data ("normal" and "overflow" + optabs in the optabs.def comment) + 4: non-conversion optabs without libcall data ("direct" optabs). */ + unsigned int kind; +}; + +extern optab_def optabs[]; +extern unsigned int num_optabs; + +/* Information about an instruction name that matches an optab pattern. */ +struct optab_pattern +{ + /* The name of the instruction. */ + const char *name; + + /* The matching optab. */ + unsigned int op; + + /* The optab modes. M2 is only significant for conversion optabs; + it is zero otherwise. */ + unsigned int m1, m2; + + /* An index that provides a lexicographical sort of (OP, M2, M1). + Used by genopinit.cc. */ + unsigned int sort_num; +}; + +extern rtx add_implicit_parallel (rtvec); +extern rtx_reader *init_rtx_reader_args_cb (int, const char **, + bool (*)(const char *)); +extern rtx_reader *init_rtx_reader_args (int, const char **); +extern bool read_md_rtx (md_rtx_info *); +extern unsigned int get_num_insn_codes (); + +/* Set this to 0 to disable automatic elision of insn patterns which + can never be used in this configuration. See genconditions.cc. + Must be set before calling init_md_reader. */ +extern int insn_elision; + +/* Return the C test that says whether a definition rtx can be used, + or "" if it can be used unconditionally. */ +extern const char *get_c_test (rtx); + +/* If the C test passed as the argument can be evaluated at compile + time, return its truth value; else return -1. The test must have + appeared somewhere in the machine description when genconditions + was run. */ +extern int maybe_eval_c_test (const char *); + +/* Add an entry to the table of conditions. Used by genconditions and + by read-rtl.cc. */ +extern void add_c_test (const char *, int); + +/* This structure is used internally by gensupport.cc and genconditions.cc. */ +struct c_test +{ + const char *expr; + int value; +}; + +#ifdef __HASHTAB_H__ +extern hashval_t hash_c_test (const void *); +extern int cmp_c_test (const void *, const void *); +extern void traverse_c_tests (htab_trav, void *); +#endif + +/* Predicate handling: helper functions and data structures. */ + +struct pred_data +{ + struct pred_data *next; /* for iterating over the set of all preds */ + const char *name; /* predicate name */ + bool special; /* special handling of modes? */ + + /* data used primarily by genpreds.cc */ + const char *c_block; /* C test block */ + rtx exp; /* RTL test expression */ + + /* data used primarily by genrecog.cc */ + enum rtx_code singleton; /* if pred takes only one code, that code */ + int num_codes; /* number of codes accepted */ + bool allows_non_lvalue; /* if pred allows non-lvalue expressions */ + bool allows_non_const; /* if pred allows non-const expressions */ + bool codes[NUM_RTX_CODE]; /* set of codes accepted */ +}; + +extern struct pred_data *first_predicate; +extern struct pred_data *lookup_predicate (const char *); +extern void add_predicate_code (struct pred_data *, enum rtx_code); +extern void add_predicate (struct pred_data *); + +#define FOR_ALL_PREDICATES(p) for (p = first_predicate; p; p = p->next) + +struct pattern_stats +{ + /* The largest match_operand, match_operator or match_parallel + number found. */ + int max_opno; + + /* The largest match_dup, match_op_dup or match_par_dup number found. */ + int max_dup_opno; + + /* The smallest and largest match_scratch number found. */ + int min_scratch_opno; + int max_scratch_opno; + + /* The number of times match_dup, match_op_dup or match_par_dup appears + in the pattern. */ + int num_dups; + + /* The number of rtx arguments to the generator function. */ + int num_generator_args; + + /* The number of rtx operands in an insn. */ + int num_insn_operands; + + /* The number of operand variables that are needed. */ + int num_operand_vars; +}; + +extern void get_pattern_stats (struct pattern_stats *ranges, rtvec vec); +extern void compute_test_codes (rtx, file_location, char *); +extern file_location get_file_location (rtx); +extern const char *get_emit_function (rtx); +extern bool needs_barrier_p (rtx); +extern bool find_optab (optab_pattern *, const char *); + +#endif /* GCC_GENSUPPORT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ggc-internal.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ggc-internal.h new file mode 100644 index 0000000..a353de4 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ggc-internal.h @@ -0,0 +1,115 @@ +/* Garbage collection for the GNU compiler. Internal definitions + for ggc-*.c and stringpool.cc. + + Copyright (C) 2009-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GGC_INTERNAL_H +#define GCC_GGC_INTERNAL_H + + +/* Call ggc_set_mark on all the roots. */ +extern void ggc_mark_roots (void); + +/* Stringpool. */ + +/* Mark the entries in the string pool. */ +extern void ggc_mark_stringpool (void); + +/* Purge the entries in the string pool. */ +extern void ggc_purge_stringpool (void); + +/* Save and restore the string pool entries for PCH. */ + +extern void gt_pch_save_stringpool (void); +extern void gt_pch_fixup_stringpool (void); +extern void gt_pch_restore_stringpool (void); + +/* PCH and GGC handling for strings, mostly trivial. */ +extern void gt_pch_p_S (void *, void *, gt_pointer_operator, void *); + +/* PCH. */ + +struct ggc_pch_data; + +/* Return a new ggc_pch_data structure. */ +extern struct ggc_pch_data *init_ggc_pch (void); + +/* The second parameter and third parameters give the address and size + of an object. Update the ggc_pch_data structure with as much of + that information as is necessary. The bool argument should be true + if the object is a string. */ +extern void ggc_pch_count_object (struct ggc_pch_data *, void *, size_t, bool); + +/* Return the total size of the data to be written to hold all + the objects previously passed to ggc_pch_count_object. */ +extern size_t ggc_pch_total_size (struct ggc_pch_data *); + +/* The objects, when read, will most likely be at the address + in the second parameter. */ +extern void ggc_pch_this_base (struct ggc_pch_data *, void *); + +/* Assuming that the objects really do end up at the address + passed to ggc_pch_this_base, return the address of this object. + The bool argument should be true if the object is a string. */ +extern char *ggc_pch_alloc_object (struct ggc_pch_data *, void *, size_t, bool); + +/* Write out any initial information required. */ +extern void ggc_pch_prepare_write (struct ggc_pch_data *, FILE *); + +/* Write out this object, including any padding. The last argument should be + true if the object is a string. */ +extern void ggc_pch_write_object (struct ggc_pch_data *, FILE *, void *, + void *, size_t, bool); + +/* All objects have been written, write out any final information + required. */ +extern void ggc_pch_finish (struct ggc_pch_data *, FILE *); + +/* A PCH file has just been read in at the address specified second + parameter. Set up the GC implementation for the new objects. */ +extern void ggc_pch_read (FILE *, void *); + + +/* Allocation and collection. */ + +extern void ggc_record_overhead (size_t, size_t, void * FINAL_MEM_STAT_DECL); + +extern void ggc_free_overhead (void *); + +extern void ggc_prune_overhead_list (void); + +/* Return the number of bytes allocated at the indicated address. */ +extern size_t ggc_get_size (const void *); + + +/* Statistics. */ + +/* This structure contains the statistics common to all collectors. + Particular collectors can extend this structure. */ +struct ggc_statistics +{ + /* At present, we don't really gather any interesting statistics. */ + int unused; +}; + +/* Used by the various collectors to gather and print statistics that + do not depend on the collector in use. */ +extern void ggc_print_common_statistics (FILE *, ggc_statistics *); + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ggc.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ggc.h new file mode 100644 index 0000000..aeec1ba --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/ggc.h @@ -0,0 +1,369 @@ +/* Garbage collection for the GNU compiler. + + Copyright (C) 1998-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GGC_H +#define GCC_GGC_H + +/* Symbols are marked with `ggc' for `gcc gc' so as not to interfere with + an external gc library that might be linked in. */ + +/* Internal functions and data structures used by the GTY + machinery, including the generated gt*.[hc] files. */ + +#include "gtype-desc.h" + +/* One of these applies its third parameter (with cookie in the fourth + parameter) to each pointer in the object pointed to by the first + parameter, using the second parameter. */ +typedef void (*gt_note_pointers) (void *, void *, gt_pointer_operator, + void *); + +/* One of these is called before objects are re-ordered in memory. + The first parameter is the original object, the second is the + subobject that has had its pointers reordered, the third parameter + can compute the new values of a pointer when given the cookie in + the fourth parameter. */ +typedef void (*gt_handle_reorder) (void *, void *, gt_pointer_operator, + void *); + +/* Used by the gt_pch_n_* routines. Register an object in the hash table. */ +extern int gt_pch_note_object (void *, void *, gt_note_pointers); + +/* Used by the gt_pch_p_* routines. Register address of a callback + pointer. */ +extern void gt_pch_note_callback (void *, void *); + +/* Used by the gt_pch_n_* routines. Register that an object has a reorder + function. */ +extern void gt_pch_note_reorder (void *, void *, gt_handle_reorder); + +/* generated function to clear caches in gc memory. */ +extern void gt_clear_caches (); + +/* Mark the object in the first parameter and anything it points to. */ +typedef void (*gt_pointer_walker) (void *); + +/* Structures for the easy way to mark roots. + In an array, terminated by having base == NULL. */ +struct ggc_root_tab { + void *base; + size_t nelt; + size_t stride; + gt_pointer_walker cb; + gt_pointer_walker pchw; +}; +#define LAST_GGC_ROOT_TAB { NULL, 0, 0, NULL, NULL } +/* Pointers to arrays of ggc_root_tab, terminated by NULL. */ +extern const struct ggc_root_tab * const gt_ggc_rtab[]; +extern const struct ggc_root_tab * const gt_ggc_deletable_rtab[]; +extern const struct ggc_root_tab * const gt_pch_scalar_rtab[]; + +/* If EXPR is not NULL and previously unmarked, mark it and evaluate + to true. Otherwise evaluate to false. */ +#define ggc_test_and_set_mark(EXPR) \ + ((EXPR) != NULL && ((void *) (EXPR)) != (void *) 1 && ! ggc_set_mark (EXPR)) + +#define ggc_mark(EXPR) \ + do { \ + const void *const a__ = (EXPR); \ + if (a__ != NULL && a__ != (void *) 1) \ + ggc_set_mark (a__); \ + } while (0) + +/* Actually set the mark on a particular region of memory, but don't + follow pointers. This function is called by ggc_mark_*. It + returns zero if the object was not previously marked; nonzero if + the object was already marked, or if, for any other reason, + pointers in this data structure should not be traversed. */ +extern int ggc_set_mark (const void *); + +/* Return 1 if P has been marked, zero otherwise. + P must have been allocated by the GC allocator; it mustn't point to + static objects, stack variables, or memory allocated with malloc. */ +extern int ggc_marked_p (const void *); + +/* PCH and GGC handling for strings, mostly trivial. */ +extern void gt_pch_n_S (const void *); +extern void gt_ggc_m_S (const void *); + +/* End of GTY machinery API. */ + +/* Initialize the string pool. */ +extern void init_stringpool (void); + +/* Initialize the garbage collector. */ +extern void init_ggc (void); + +/* When true, identifier nodes are considered as GC roots. When + false, identifier nodes are treated like any other GC-allocated + object, and the identifier hash table is treated as a weak + hash. */ +extern bool ggc_protect_identifiers; + +/* Write out all GCed objects to F. */ +extern void gt_pch_save (FILE *f); + + +/* Allocation. */ + +/* The internal primitive. */ +extern void *ggc_internal_alloc (size_t, void (*)(void *), size_t, + size_t CXX_MEM_STAT_INFO) + ATTRIBUTE_MALLOC; + +inline void * +ggc_internal_alloc (size_t s CXX_MEM_STAT_INFO) +{ + return ggc_internal_alloc (s, NULL, 0, 1 PASS_MEM_STAT); +} + +extern size_t ggc_round_alloc_size (size_t requested_size); + +/* Allocates cleared memory. */ +extern void *ggc_internal_cleared_alloc (size_t, void (*)(void *), + size_t, size_t + CXX_MEM_STAT_INFO) ATTRIBUTE_MALLOC; + +inline void * +ggc_internal_cleared_alloc (size_t s CXX_MEM_STAT_INFO) +{ + return ggc_internal_cleared_alloc (s, NULL, 0, 1 PASS_MEM_STAT); +} + +/* Resize a block. */ +extern void *ggc_realloc (void *, size_t CXX_MEM_STAT_INFO); + +/* Free a block. To be used when known for certain it's not reachable. */ +extern void ggc_free (void *); + +extern void dump_ggc_loc_statistics (); + +/* Reallocator. */ +#define GGC_RESIZEVEC(T, P, N) \ + ((T *) ggc_realloc ((P), (N) * sizeof (T) MEM_STAT_INFO)) + +template +void +finalize (void *p) +{ + static_cast (p)->~T (); +} + +template +inline bool +need_finalization_p () +{ +#if GCC_VERSION >= 4003 + return !__has_trivial_destructor (T); +#else + return true; +#endif +} + +template +inline T * +ggc_alloc (ALONE_CXX_MEM_STAT_INFO) +{ + if (need_finalization_p ()) + return static_cast (ggc_internal_alloc (sizeof (T), finalize, 0, 1 + PASS_MEM_STAT)); + else + return static_cast (ggc_internal_alloc (sizeof (T), NULL, 0, 1 + PASS_MEM_STAT)); +} + +/* GGC allocation function that does not call finalizer for type + that have need_finalization_p equal to true. User is responsible + for calling of the destructor. */ + +template +inline T * +ggc_alloc_no_dtor (ALONE_CXX_MEM_STAT_INFO) +{ + return static_cast (ggc_internal_alloc (sizeof (T), NULL, 0, 1 + PASS_MEM_STAT)); +} + +template +inline T * +ggc_cleared_alloc (ALONE_CXX_MEM_STAT_INFO) +{ + if (need_finalization_p ()) + return static_cast (ggc_internal_cleared_alloc (sizeof (T), + finalize, 0, 1 + PASS_MEM_STAT)); + else + return static_cast (ggc_internal_cleared_alloc (sizeof (T), NULL, 0, 1 + PASS_MEM_STAT)); +} + +template +inline T * +ggc_vec_alloc (size_t c CXX_MEM_STAT_INFO) +{ + if (need_finalization_p ()) + return static_cast (ggc_internal_alloc (c * sizeof (T), finalize, + sizeof (T), c PASS_MEM_STAT)); + else + return static_cast (ggc_internal_alloc (c * sizeof (T), NULL, 0, 0 + PASS_MEM_STAT)); +} + +template +inline T * +ggc_cleared_vec_alloc (size_t c CXX_MEM_STAT_INFO) +{ + if (need_finalization_p ()) + return static_cast (ggc_internal_cleared_alloc (c * sizeof (T), + finalize, + sizeof (T), c + PASS_MEM_STAT)); + else + return static_cast (ggc_internal_cleared_alloc (c * sizeof (T), NULL, + 0, 0 PASS_MEM_STAT)); +} + +inline void * +ggc_alloc_atomic (size_t s CXX_MEM_STAT_INFO) +{ + return ggc_internal_alloc (s PASS_MEM_STAT); +} + +/* Call destructor and free the garbage collected memory. */ + +template +inline void +ggc_delete (T *ptr) +{ + ptr->~T (); + ggc_free (ptr); +} + +/* Allocate a gc-able string, and fill it with LENGTH bytes from CONTENTS. + If LENGTH is -1, then CONTENTS is assumed to be a + null-terminated string and the memory sized accordingly. */ +extern const char *ggc_alloc_string (const char *contents, int length + CXX_MEM_STAT_INFO); + +/* Make a copy of S, in GC-able memory. */ +#define ggc_strdup(S) ggc_alloc_string ((S), -1 MEM_STAT_INFO) + +/* Invoke the collector. Garbage collection occurs only when this + function is called, not during allocations. */ +enum ggc_collect { + GGC_COLLECT_HEURISTIC, + GGC_COLLECT_FORCE +}; +extern void ggc_collect (enum ggc_collect mode = GGC_COLLECT_HEURISTIC); + +/* Return unused memory pages to the system. */ +extern void ggc_trim (void); + +/* Assume that all GGC memory is reachable and grow the limits for next collection. */ +extern void ggc_grow (void); + +/* Register an additional root table. This can be useful for some + plugins. Does nothing if the passed pointer is NULL. */ +extern void ggc_register_root_tab (const struct ggc_root_tab *); + +/* Read objects previously saved with gt_pch_save from F. */ +extern void gt_pch_restore (FILE *f); + +/* Statistics. */ + +/* Print allocation statistics. */ +extern void ggc_print_statistics (void); + +extern void stringpool_statistics (void); + +/* Heuristics. */ +extern void init_ggc_heuristics (void); + +/* Report current heap memory use to stderr. */ +extern void report_heap_memory_use (void); + +#define ggc_alloc_rtvec_sized(NELT) \ + (rtvec_def *) ggc_internal_alloc (sizeof (struct rtvec_def) \ + + ((NELT) - 1) * sizeof (rtx)) \ + +/* Memory statistics passing versions of some allocators. Too few of them to + make gengtype produce them, so just define the needed ones here. */ +inline struct rtx_def * +ggc_alloc_rtx_def_stat (size_t s CXX_MEM_STAT_INFO) +{ + return (struct rtx_def *) ggc_internal_alloc (s PASS_MEM_STAT); +} + +inline union tree_node * +ggc_alloc_tree_node_stat (size_t s CXX_MEM_STAT_INFO) +{ + return (union tree_node *) ggc_internal_alloc (s PASS_MEM_STAT); +} + +inline union tree_node * +ggc_alloc_cleared_tree_node_stat (size_t s CXX_MEM_STAT_INFO) +{ + return (union tree_node *) ggc_internal_cleared_alloc (s PASS_MEM_STAT); +} + +inline gimple * +ggc_alloc_cleared_gimple_statement_stat (size_t s CXX_MEM_STAT_INFO) +{ + return (gimple *) ggc_internal_cleared_alloc (s PASS_MEM_STAT); +} + +inline void +gt_ggc_mx (const char *s) +{ + ggc_test_and_set_mark (const_cast (s)); +} + +inline void +gt_pch_nx (const char *) +{ +} + +inline void gt_pch_nx (bool) { } +inline void gt_pch_nx (char) { } +inline void gt_pch_nx (signed char) { } +inline void gt_pch_nx (unsigned char) { } +inline void gt_pch_nx (short) { } +inline void gt_pch_nx (unsigned short) { } +inline void gt_pch_nx (int) { } +inline void gt_pch_nx (unsigned int) { } +inline void gt_pch_nx (long int) { } +inline void gt_pch_nx (unsigned long int) { } +inline void gt_pch_nx (long long int) { } +inline void gt_pch_nx (unsigned long long int) { } + +inline void gt_ggc_mx (bool) { } +inline void gt_ggc_mx (char) { } +inline void gt_ggc_mx (signed char) { } +inline void gt_ggc_mx (unsigned char) { } +inline void gt_ggc_mx (short) { } +inline void gt_ggc_mx (unsigned short) { } +inline void gt_ggc_mx (int) { } +inline void gt_ggc_mx (unsigned int) { } +inline void gt_ggc_mx (long int) { } +inline void gt_ggc_mx (unsigned long int) { } +inline void gt_ggc_mx (long long int) { } +inline void gt_ggc_mx (unsigned long long int) { } + +#endif diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-array-bounds.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-array-bounds.h new file mode 100644 index 0000000..eb39927 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-array-bounds.h @@ -0,0 +1,49 @@ +/* Array bounds checking. + Copyright (C) 2020-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_ARRAY_BOUNDS_H +#define GCC_GIMPLE_ARRAY_BOUNDS_H + +#include "pointer-query.h" + +class array_bounds_checker +{ + friend class check_array_bounds_dom_walker; + +public: + array_bounds_checker (struct function *, range_query *); + void check (); + +private: + static tree check_array_bounds (tree *tp, int *walk_subtree, void *data); + bool check_array_ref (location_t, tree, gimple *, bool ignore_off_by_one); + bool check_mem_ref (location_t, tree, bool ignore_off_by_one); + void check_addr_expr (location_t, tree, gimple *); + const value_range *get_value_range (const_tree op, gimple *); + + /* Current function. */ + struct function *fun; + /* A pointer_query object to store information about pointers and + their targets in. */ + pointer_query m_ptr_qry; + /* Current statement. */ + gimple *m_stmt; +}; + +#endif // GCC_GIMPLE_ARRAY_BOUNDS_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-builder.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-builder.h new file mode 100644 index 0000000..4546c50 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-builder.h @@ -0,0 +1,36 @@ +/* Header file for high level statement building routines. + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + + +#ifndef GCC_GIMPLE_BUILDER_H +#define GCC_GIMPLE_BUILDER_H + +/* ??? This API is legacy and should not be used in new code. */ + +gassign *build_assign (enum tree_code, tree, int, tree lhs = NULL_TREE); +gassign *build_assign (enum tree_code, gimple *, int, tree lhs = NULL_TREE); +gassign *build_assign (enum tree_code, tree, tree, tree lhs = NULL_TREE); +gassign *build_assign (enum tree_code, gimple *, tree, tree lhs = NULL_TREE); +gassign *build_assign (enum tree_code, tree, gimple *, tree lhs = NULL_TREE); +gassign *build_assign (enum tree_code, gimple *, gimple *, + tree lhs = NULL_TREE); +gassign *build_type_cast (tree, tree, tree lhs = NULL_TREE); +gassign *build_type_cast (tree, gimple *, tree lhs = NULL_TREE); + +#endif /* GCC_GIMPLE_BUILDER_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-expr.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-expr.h new file mode 100644 index 0000000..0697126 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-expr.h @@ -0,0 +1,179 @@ +/* Header file for gimple decl, type and expressions. + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_EXPR_H +#define GCC_GIMPLE_EXPR_H + +extern bool useless_type_conversion_p (tree, tree); + + +extern void gimple_set_body (tree, gimple_seq); +extern gimple_seq gimple_body (tree); +extern bool gimple_has_body_p (tree); +extern const char *gimple_decl_printable_name (tree, int); +extern tree copy_var_decl (tree, tree, tree); +extern tree create_tmp_var_name (const char *); +extern tree create_tmp_var_raw (tree, const char * = NULL); +extern tree create_tmp_var (tree, const char * = NULL); +extern tree create_tmp_reg (tree, const char * = NULL); +extern tree create_tmp_reg_fn (struct function *, tree, const char *); + + +extern void extract_ops_from_tree (tree, enum tree_code *, tree *, tree *, + tree *); +extern void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, + tree *); +extern bool is_gimple_lvalue (tree); +extern bool is_gimple_condexpr (tree); +extern bool is_gimple_condexpr_for_cond (tree); +extern bool is_gimple_address (const_tree); +extern bool is_gimple_invariant_address (const_tree); +extern bool is_gimple_ip_invariant_address (const_tree); +extern bool is_gimple_min_invariant (const_tree); +extern bool is_gimple_ip_invariant (const_tree); +extern bool is_gimple_reg (tree); +extern bool is_gimple_val (tree); +extern bool is_gimple_asm_val (tree); +extern bool is_gimple_min_lval (tree); +extern bool is_gimple_call_addr (tree); +extern bool is_gimple_mem_ref_addr (tree); +extern void flush_mark_addressable_queue (void); +extern void mark_addressable (tree); +extern bool is_gimple_reg_rhs (tree); + +/* Return true if a conversion from either type of TYPE1 and TYPE2 + to the other is not required. Otherwise return false. */ + +static inline bool +types_compatible_p (tree type1, tree type2) +{ + return (type1 == type2 + || (useless_type_conversion_p (type1, type2) + && useless_type_conversion_p (type2, type1))); +} + +/* Return true if TYPE is a suitable type for a scalar register variable. */ + +static inline bool +is_gimple_reg_type (tree type) +{ + return !AGGREGATE_TYPE_P (type); +} + +/* Return true if T is a variable. */ + +static inline bool +is_gimple_variable (tree t) +{ + return (TREE_CODE (t) == VAR_DECL + || TREE_CODE (t) == PARM_DECL + || TREE_CODE (t) == RESULT_DECL + || TREE_CODE (t) == SSA_NAME); +} + +/* Return true if T is a GIMPLE identifier (something with an address). */ + +static inline bool +is_gimple_id (tree t) +{ + return (is_gimple_variable (t) + || TREE_CODE (t) == FUNCTION_DECL + || TREE_CODE (t) == LABEL_DECL + || TREE_CODE (t) == CONST_DECL + /* Allow string constants, since they are addressable. */ + || TREE_CODE (t) == STRING_CST); +} + +/* Return true if OP, an SSA name or a DECL is a virtual operand. */ + +static inline bool +virtual_operand_p (tree op) +{ + if (TREE_CODE (op) == SSA_NAME) + return SSA_NAME_IS_VIRTUAL_OPERAND (op); + + if (TREE_CODE (op) == VAR_DECL) + return VAR_DECL_IS_VIRTUAL_OPERAND (op); + + return false; +} + +/* Return true if T is something whose address can be taken. */ + +static inline bool +is_gimple_addressable (tree t) +{ + return (is_gimple_id (t) || handled_component_p (t) + || TREE_CODE (t) == TARGET_MEM_REF + || TREE_CODE (t) == MEM_REF); +} + +/* Return true if T is a valid gimple constant. */ + +static inline bool +is_gimple_constant (const_tree t) +{ + switch (TREE_CODE (t)) + { + case INTEGER_CST: + case POLY_INT_CST: + case REAL_CST: + case FIXED_CST: + case COMPLEX_CST: + case VECTOR_CST: + case STRING_CST: + return true; + + default: + return false; + } +} + +/* A wrapper around extract_ops_from_tree with 3 ops, for callers which + expect to see only a maximum of two operands. */ + +static inline void +extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0, + tree *op1) +{ + tree op2; + extract_ops_from_tree (expr, code, op0, op1, &op2); + gcc_assert (op2 == NULL_TREE); +} + +/* Given a valid GIMPLE_CALL function address return the FUNCTION_DECL + associated with the callee if known. Otherwise return NULL_TREE. */ + +static inline tree +gimple_call_addr_fndecl (const_tree fn) +{ + if (fn && TREE_CODE (fn) == ADDR_EXPR) + { + tree fndecl = TREE_OPERAND (fn, 0); + if (TREE_CODE (fndecl) == MEM_REF + && TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR + && integer_zerop (TREE_OPERAND (fndecl, 1))) + fndecl = TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0); + if (TREE_CODE (fndecl) == FUNCTION_DECL) + return fndecl; + } + return NULL_TREE; +} + +#endif /* GCC_GIMPLE_EXPR_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-fold.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-fold.h new file mode 100644 index 0000000..3a0ef54 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-fold.h @@ -0,0 +1,184 @@ +/* Gimple folding definitions. + + Copyright (C) 2011-2022 Free Software Foundation, Inc. + Contributed by Richard Guenther + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_FOLD_H +#define GCC_GIMPLE_FOLD_H + +extern tree create_tmp_reg_or_ssa_name (tree, gimple *stmt = NULL); +extern tree canonicalize_constructor_val (tree, tree); +extern tree get_symbol_constant_value (tree); +struct c_strlen_data; +extern bool get_range_strlen (tree, c_strlen_data *, unsigned eltsize); +extern void gimplify_and_update_call_from_tree (gimple_stmt_iterator *, tree); +extern bool update_gimple_call (gimple_stmt_iterator *, tree, int, ...); +extern bool fold_stmt (gimple_stmt_iterator *); +extern bool fold_stmt (gimple_stmt_iterator *, tree (*) (tree)); +extern bool fold_stmt_inplace (gimple_stmt_iterator *); +extern tree maybe_fold_and_comparisons (tree, enum tree_code, tree, tree, + enum tree_code, tree, tree, + basic_block = nullptr); +extern tree maybe_fold_or_comparisons (tree, enum tree_code, tree, tree, + enum tree_code, tree, tree, + basic_block = nullptr); +extern bool clear_padding_type_may_have_padding_p (tree); +extern void clear_type_padding_in_mask (tree, unsigned char *); +extern bool optimize_atomic_compare_exchange_p (gimple *); +extern void fold_builtin_atomic_compare_exchange (gimple_stmt_iterator *); +extern bool arith_overflowed_p (enum tree_code, const_tree, const_tree, + const_tree); +extern tree no_follow_ssa_edges (tree); +extern tree follow_single_use_edges (tree); +extern tree follow_all_ssa_edges (tree); +extern tree gimple_fold_stmt_to_constant_1 (gimple *, tree (*) (tree), + tree (*) (tree) = no_follow_ssa_edges); +extern tree gimple_fold_stmt_to_constant (gimple *, tree (*) (tree)); +extern tree fold_ctor_reference (tree, tree, const poly_uint64&, + const poly_uint64&, tree, + unsigned HOST_WIDE_INT * = NULL); +extern tree fold_const_aggregate_ref_1 (tree, tree (*) (tree)); +extern tree fold_const_aggregate_ref (tree); +extern tree gimple_get_virt_method_for_binfo (HOST_WIDE_INT, tree, + bool *can_refer = NULL); +extern tree gimple_get_virt_method_for_vtable (HOST_WIDE_INT, tree, + unsigned HOST_WIDE_INT, + bool *can_refer = NULL); +extern tree gimple_fold_indirect_ref (tree); +extern bool gimple_fold_builtin_sprintf (gimple_stmt_iterator *); +extern bool gimple_fold_builtin_snprintf (gimple_stmt_iterator *); +extern bool arith_code_with_undefined_signed_overflow (tree_code); +extern gimple_seq rewrite_to_defined_overflow (gimple *, bool = false); +extern void replace_call_with_value (gimple_stmt_iterator *, tree); +extern tree tree_vec_extract (gimple_stmt_iterator *, tree, tree, tree, tree); + +/* gimple_build, functionally matching fold_buildN, outputs stmts + int the provided sequence, matching and simplifying them on-the-fly. + Supposed to replace force_gimple_operand (fold_buildN (...), ...). */ +extern tree gimple_build (gimple_seq *, location_t, + enum tree_code, tree, tree); +inline tree +gimple_build (gimple_seq *seq, + enum tree_code code, tree type, tree op0) +{ + return gimple_build (seq, UNKNOWN_LOCATION, code, type, op0); +} +extern tree gimple_build (gimple_seq *, location_t, + enum tree_code, tree, tree, tree); +inline tree +gimple_build (gimple_seq *seq, + enum tree_code code, tree type, tree op0, tree op1) +{ + return gimple_build (seq, UNKNOWN_LOCATION, code, type, op0, op1); +} +extern tree gimple_build (gimple_seq *, location_t, + enum tree_code, tree, tree, tree, tree); +inline tree +gimple_build (gimple_seq *seq, + enum tree_code code, tree type, tree op0, tree op1, tree op2) +{ + return gimple_build (seq, UNKNOWN_LOCATION, code, type, op0, op1, op2); +} +extern tree gimple_build (gimple_seq *, location_t, combined_fn, tree); +inline tree +gimple_build (gimple_seq *seq, combined_fn fn, tree type) +{ + return gimple_build (seq, UNKNOWN_LOCATION, fn, type); +} +extern tree gimple_build (gimple_seq *, location_t, combined_fn, tree, tree); +inline tree +gimple_build (gimple_seq *seq, combined_fn fn, tree type, tree arg0) +{ + return gimple_build (seq, UNKNOWN_LOCATION, fn, type, arg0); +} +extern tree gimple_build (gimple_seq *, location_t, combined_fn, + tree, tree, tree); +inline tree +gimple_build (gimple_seq *seq, combined_fn fn, + tree type, tree arg0, tree arg1) +{ + return gimple_build (seq, UNKNOWN_LOCATION, fn, type, arg0, arg1); +} +extern tree gimple_build (gimple_seq *, location_t, combined_fn, + tree, tree, tree, tree); +inline tree +gimple_build (gimple_seq *seq, combined_fn fn, + tree type, tree arg0, tree arg1, tree arg2) +{ + return gimple_build (seq, UNKNOWN_LOCATION, fn, type, arg0, arg1, arg2); +} + +extern tree gimple_convert (gimple_seq *, location_t, tree, tree); +inline tree +gimple_convert (gimple_seq *seq, tree type, tree op) +{ + return gimple_convert (seq, UNKNOWN_LOCATION, type, op); +} + +extern tree gimple_convert_to_ptrofftype (gimple_seq *, location_t, tree); +inline tree +gimple_convert_to_ptrofftype (gimple_seq *seq, tree op) +{ + return gimple_convert_to_ptrofftype (seq, UNKNOWN_LOCATION, op); +} + +extern tree gimple_build_vector_from_val (gimple_seq *, location_t, tree, + tree); +inline tree +gimple_build_vector_from_val (gimple_seq *seq, tree type, tree op) +{ + return gimple_build_vector_from_val (seq, UNKNOWN_LOCATION, type, op); +} + +class tree_vector_builder; +extern tree gimple_build_vector (gimple_seq *, location_t, + tree_vector_builder *); +inline tree +gimple_build_vector (gimple_seq *seq, tree_vector_builder *builder) +{ + return gimple_build_vector (seq, UNKNOWN_LOCATION, builder); +} + +extern tree gimple_build_round_up (gimple_seq *, location_t, tree, tree, + unsigned HOST_WIDE_INT); +inline tree +gimple_build_round_up (gimple_seq *seq, tree type, tree old_size, + unsigned HOST_WIDE_INT align) +{ + return gimple_build_round_up (seq, UNKNOWN_LOCATION, type, old_size, align); +} + +extern bool gimple_stmt_nonnegative_warnv_p (gimple *, bool *, int = 0); +extern bool gimple_stmt_integer_valued_real_p (gimple *, int = 0); + +/* In gimple-match.cc. */ +extern tree gimple_simplify (enum tree_code, tree, tree, + gimple_seq *, tree (*)(tree)); +extern tree gimple_simplify (enum tree_code, tree, tree, tree, + gimple_seq *, tree (*)(tree)); +extern tree gimple_simplify (enum tree_code, tree, tree, tree, tree, + gimple_seq *, tree (*)(tree)); +extern tree gimple_simplify (combined_fn, tree, tree, + gimple_seq *, tree (*)(tree)); +extern tree gimple_simplify (combined_fn, tree, tree, tree, + gimple_seq *, tree (*)(tree)); +extern tree gimple_simplify (combined_fn, tree, tree, tree, tree, + gimple_seq *, tree (*)(tree)); + +#endif /* GCC_GIMPLE_FOLD_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-iterator.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-iterator.h new file mode 100644 index 0000000..216ebee --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-iterator.h @@ -0,0 +1,400 @@ +/* Header file for gimple iterators. + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_ITERATOR_H +#define GCC_GIMPLE_ITERATOR_H + +/* Iterator object for GIMPLE statement sequences. */ + +struct gimple_stmt_iterator +{ + /* Sequence node holding the current statement. */ + gimple_seq_node ptr; + + /* Sequence and basic block holding the statement. These fields + are necessary to handle edge cases such as when statement is + added to an empty basic block or when the last statement of a + block/sequence is removed. */ + gimple_seq *seq; + basic_block bb; +}; + +/* Iterator over GIMPLE_PHI statements. */ +struct gphi_iterator : public gimple_stmt_iterator +{ + gphi *phi () const + { + return as_a (ptr); + } +}; + +enum gsi_iterator_update +{ + GSI_NEW_STMT = 2, /* Move the iterator to the first statement added. */ + GSI_LAST_NEW_STMT, /* Move the iterator to the last statement added. */ + GSI_SAME_STMT, /* Leave the iterator at the same statement. */ + GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable + for linking other statements in the same + direction. */ +}; + +extern void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, + gimple_seq, + enum gsi_iterator_update); +extern void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq, + enum gsi_iterator_update); +extern void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, + gimple_seq, + enum gsi_iterator_update); +extern void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq, + enum gsi_iterator_update); +extern gimple_seq gsi_split_seq_after (gimple_stmt_iterator); +extern void gsi_set_stmt (gimple_stmt_iterator *, gimple *); +extern void gsi_split_seq_before (gimple_stmt_iterator *, gimple_seq *); +extern bool gsi_replace (gimple_stmt_iterator *, gimple *, bool); +extern void gsi_replace_with_seq (gimple_stmt_iterator *, gimple_seq, bool); +extern void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple *, + enum gsi_iterator_update); +extern void gsi_insert_before (gimple_stmt_iterator *, gimple *, + enum gsi_iterator_update); +extern void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple *, + enum gsi_iterator_update); +extern void gsi_insert_after (gimple_stmt_iterator *, gimple *, + enum gsi_iterator_update); +extern bool gsi_remove (gimple_stmt_iterator *, bool); +extern gimple_stmt_iterator gsi_for_stmt (gimple *); +extern gimple_stmt_iterator gsi_for_stmt (gimple *, gimple_seq *); +extern gphi_iterator gsi_for_phi (gphi *); +extern void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *); +extern void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *); +extern void gsi_move_to_bb_end (gimple_stmt_iterator *, basic_block); +extern void gsi_insert_on_edge (edge, gimple *); +extern void gsi_insert_seq_on_edge (edge, gimple_seq); +extern basic_block gsi_insert_on_edge_immediate (edge, gimple *); +extern basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq); +extern void gsi_commit_edge_inserts (void); +extern void gsi_commit_one_edge_insert (edge, basic_block *); +extern gphi_iterator gsi_start_phis (basic_block); +extern void update_modified_stmts (gimple_seq); + +/* Return a new iterator pointing to GIMPLE_SEQ's first statement. */ + +static inline gimple_stmt_iterator +gsi_start_1 (gimple_seq *seq) +{ + gimple_stmt_iterator i; + + i.ptr = gimple_seq_first (*seq); + i.seq = seq; + i.bb = i.ptr ? gimple_bb (i.ptr) : NULL; + + return i; +} + +#define gsi_start(x) gsi_start_1 (&(x)) + +static inline gimple_stmt_iterator +gsi_none (void) +{ + gimple_stmt_iterator i; + i.ptr = NULL; + i.seq = NULL; + i.bb = NULL; + return i; +} + +/* Return a new iterator pointing to the first statement in basic block BB. */ + +static inline gimple_stmt_iterator +gsi_start_bb (basic_block bb) +{ + gimple_stmt_iterator i; + gimple_seq *seq; + + seq = bb_seq_addr (bb); + i.ptr = gimple_seq_first (*seq); + i.seq = seq; + i.bb = bb; + + return i; +} + +gimple_stmt_iterator gsi_start_edge (edge e); + +/* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */ + +static inline gimple_stmt_iterator +gsi_last_1 (gimple_seq *seq) +{ + gimple_stmt_iterator i; + + i.ptr = gimple_seq_last (*seq); + i.seq = seq; + i.bb = i.ptr ? gimple_bb (i.ptr) : NULL; + + return i; +} + +#define gsi_last(x) gsi_last_1 (&(x)) + +/* Return a new iterator pointing to the last statement in basic block BB. */ + +static inline gimple_stmt_iterator +gsi_last_bb (basic_block bb) +{ + gimple_stmt_iterator i; + gimple_seq *seq; + + seq = bb_seq_addr (bb); + i.ptr = gimple_seq_last (*seq); + i.seq = seq; + i.bb = bb; + + return i; +} + +/* Return true if I is at the end of its sequence. */ + +static inline bool +gsi_end_p (gimple_stmt_iterator i) +{ + return i.ptr == NULL; +} + +/* Return true if I is one statement before the end of its sequence. */ + +static inline bool +gsi_one_before_end_p (gimple_stmt_iterator i) +{ + return i.ptr != NULL && i.ptr->next == NULL; +} + +/* Advance the iterator to the next gimple statement. */ + +static inline void +gsi_next (gimple_stmt_iterator *i) +{ + i->ptr = i->ptr->next; +} + +/* Advance the iterator to the previous gimple statement. */ + +static inline void +gsi_prev (gimple_stmt_iterator *i) +{ + gimple *prev = i->ptr->prev; + if (prev->next) + i->ptr = prev; + else + i->ptr = NULL; +} + +/* Return the current stmt. */ + +static inline gimple * +gsi_stmt (gimple_stmt_iterator i) +{ + return i.ptr; +} + +/* Return a block statement iterator that points to the first + non-label statement in block BB. */ + +static inline gimple_stmt_iterator +gsi_after_labels (basic_block bb) +{ + gimple_stmt_iterator gsi = gsi_start_bb (bb); + + for (; !gsi_end_p (gsi); ) + { + if (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL) + gsi_next (&gsi); + else + break; + } + + return gsi; +} + +/* Advance the iterator to the next non-debug gimple statement. */ + +static inline void +gsi_next_nondebug (gimple_stmt_iterator *i) +{ + do + { + gsi_next (i); + } + while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); +} + +/* Advance the iterator to the previous non-debug gimple statement. */ + +static inline void +gsi_prev_nondebug (gimple_stmt_iterator *i) +{ + do + { + gsi_prev (i); + } + while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); +} + +/* Return a new iterator pointing to the first non-debug statement in + SEQ. */ + +static inline gimple_stmt_iterator +gsi_start_nondebug (gimple_seq seq) +{ + gimple_stmt_iterator gsi = gsi_start (seq); + if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi))) + gsi_next_nondebug (&gsi); + + return gsi; +} + +/* Return a new iterator pointing to the first non-debug statement in + basic block BB. */ + +static inline gimple_stmt_iterator +gsi_start_nondebug_bb (basic_block bb) +{ + gimple_stmt_iterator i = gsi_start_bb (bb); + + if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) + gsi_next_nondebug (&i); + + return i; +} + +/* Return a new iterator pointing to the first non-debug non-label statement in + basic block BB. */ + +static inline gimple_stmt_iterator +gsi_start_nondebug_after_labels_bb (basic_block bb) +{ + gimple_stmt_iterator i = gsi_after_labels (bb); + + if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) + gsi_next_nondebug (&i); + + return i; +} + +/* Return a new iterator pointing to the last non-debug statement in + basic block BB. */ + +static inline gimple_stmt_iterator +gsi_last_nondebug_bb (basic_block bb) +{ + gimple_stmt_iterator i = gsi_last_bb (bb); + + if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) + gsi_prev_nondebug (&i); + + return i; +} + +/* Return true if I is followed only by debug statements in its + sequence. */ + +static inline bool +gsi_one_nondebug_before_end_p (gimple_stmt_iterator i) +{ + if (gsi_one_before_end_p (i)) + return true; + if (gsi_end_p (i)) + return false; + gsi_next_nondebug (&i); + return gsi_end_p (i); +} + +/* Advance I statement iterator to the next non-virtual GIMPLE_PHI + statement. */ + +static inline void +gsi_next_nonvirtual_phi (gphi_iterator *i) +{ + do + { + gsi_next (i); + } + while (!gsi_end_p (*i) && virtual_operand_p (gimple_phi_result (i->phi ()))); +} + +/* Return a new iterator pointing to the first non-virtual phi statement in + basic block BB. */ + +static inline gphi_iterator +gsi_start_nonvirtual_phis (basic_block bb) +{ + gphi_iterator i = gsi_start_phis (bb); + + if (!gsi_end_p (i) && virtual_operand_p (gimple_phi_result (i.phi ()))) + gsi_next_nonvirtual_phi (&i); + + return i; +} + +/* Return the basic block associated with this iterator. */ + +static inline basic_block +gsi_bb (gimple_stmt_iterator i) +{ + return i.bb; +} + +/* Return the sequence associated with this iterator. */ + +static inline gimple_seq +gsi_seq (gimple_stmt_iterator i) +{ + return *i.seq; +} + +/* Determine whether SEQ is a nondebug singleton. */ + +static inline bool +gimple_seq_nondebug_singleton_p (gimple_seq seq) +{ + gimple_stmt_iterator gsi; + + /* Find a nondebug gimple. */ + gsi.ptr = gimple_seq_first (seq); + gsi.seq = &seq; + gsi.bb = NULL; + while (!gsi_end_p (gsi) + && is_gimple_debug (gsi_stmt (gsi))) + gsi_next (&gsi); + + /* No nondebug gimple found, not a singleton. */ + if (gsi_end_p (gsi)) + return false; + + /* Find a next nondebug gimple. */ + gsi_next (&gsi); + while (!gsi_end_p (gsi) + && is_gimple_debug (gsi_stmt (gsi))) + gsi_next (&gsi); + + /* Only a singleton if there's no next nondebug gimple. */ + return gsi_end_p (gsi); +} + +#endif /* GCC_GIMPLE_ITERATOR_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-low.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-low.h new file mode 100644 index 0000000..08d6878 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-low.h @@ -0,0 +1,28 @@ +/* Header file for gimple lowering pass. + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_LOW_H +#define GCC_GIMPLE_LOW_H + +extern bool gimple_stmt_may_fallthru (gimple *); +extern bool gimple_seq_may_fallthru (gimple_seq); +extern void record_vars_into (tree, tree); +extern void record_vars (tree); + +#endif /* GCC_GIMPLE_LOW_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-match.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-match.h new file mode 100644 index 0000000..d7b0b67 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-match.h @@ -0,0 +1,413 @@ +/* Gimple simplify definitions. + + Copyright (C) 2011-2022 Free Software Foundation, Inc. + Contributed by Richard Guenther + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_MATCH_H +#define GCC_GIMPLE_MATCH_H + + +/* Helper to transparently allow tree codes and builtin function codes + exist in one storage entity. */ +class code_helper +{ +public: + code_helper () {} + code_helper (tree_code code) : rep ((int) code) {} + code_helper (combined_fn fn) : rep (-(int) fn) {} + code_helper (internal_fn fn) : rep (-(int) as_combined_fn (fn)) {} + explicit operator tree_code () const { return (tree_code) rep; } + explicit operator combined_fn () const { return (combined_fn) -rep; } + explicit operator internal_fn () const; + explicit operator built_in_function () const; + bool is_tree_code () const { return rep > 0; } + bool is_fn_code () const { return rep < 0; } + bool is_internal_fn () const; + bool is_builtin_fn () const; + int get_rep () const { return rep; } + bool operator== (const code_helper &other) { return rep == other.rep; } + bool operator!= (const code_helper &other) { return rep != other.rep; } + bool operator== (tree_code c) { return rep == code_helper (c).rep; } + bool operator!= (tree_code c) { return rep != code_helper (c).rep; } + +private: + int rep; +}; + +inline code_helper::operator internal_fn () const +{ + return as_internal_fn (combined_fn (*this)); +} + +inline code_helper::operator built_in_function () const +{ + return as_builtin_fn (combined_fn (*this)); +} + +inline bool +code_helper::is_internal_fn () const +{ + return is_fn_code () && internal_fn_p (combined_fn (*this)); +} + +inline bool +code_helper::is_builtin_fn () const +{ + return is_fn_code () && builtin_fn_p (combined_fn (*this)); +} + +/* Represents the condition under which an operation should happen, + and the value to use otherwise. The condition applies elementwise + (as for VEC_COND_EXPR) if the values are vectors. */ +class gimple_match_cond +{ +public: + enum uncond { UNCOND }; + + /* Build an unconditional op. */ + gimple_match_cond (uncond) : cond (NULL_TREE), else_value (NULL_TREE) {} + gimple_match_cond (tree, tree); + + gimple_match_cond any_else () const; + + /* The condition under which the operation occurs, or NULL_TREE + if the operation is unconditional. */ + tree cond; + + /* The value to use when the condition is false. This is NULL_TREE if + the operation is unconditional or if the value doesn't matter. */ + tree else_value; +}; + +inline +gimple_match_cond::gimple_match_cond (tree cond_in, tree else_value_in) + : cond (cond_in), else_value (else_value_in) +{ +} + +/* Return a gimple_match_cond with the same condition but with an + arbitrary ELSE_VALUE. */ + +inline gimple_match_cond +gimple_match_cond::any_else () const +{ + return gimple_match_cond (cond, NULL_TREE); +} + +/* Represents an operation to be simplified, or the result of the + simplification. */ +class gimple_match_op +{ +public: + gimple_match_op (); + gimple_match_op (const gimple_match_cond &, code_helper, tree, unsigned int); + gimple_match_op (const gimple_match_cond &, + code_helper, tree, tree); + gimple_match_op (const gimple_match_cond &, + code_helper, tree, tree, tree); + gimple_match_op (const gimple_match_cond &, + code_helper, tree, tree, tree, tree); + gimple_match_op (const gimple_match_cond &, + code_helper, tree, tree, tree, tree, tree); + gimple_match_op (const gimple_match_cond &, + code_helper, tree, tree, tree, tree, tree, tree); + + void set_op (code_helper, tree, unsigned int); + void set_op (code_helper, tree, tree); + void set_op (code_helper, tree, tree, tree); + void set_op (code_helper, tree, tree, tree, tree); + void set_op (code_helper, tree, tree, tree, tree, bool); + void set_op (code_helper, tree, tree, tree, tree, tree); + void set_op (code_helper, tree, tree, tree, tree, tree, tree); + void set_value (tree); + + tree op_or_null (unsigned int) const; + + bool resimplify (gimple_seq *, tree (*)(tree)); + + /* The maximum value of NUM_OPS. */ + static const unsigned int MAX_NUM_OPS = 5; + + /* The conditions under which the operation is performed, and the value to + use as a fallback. */ + gimple_match_cond cond; + + /* The operation being performed. */ + code_helper code; + + /* The type of the result. */ + tree type; + + /* For a BIT_FIELD_REF, whether the group of bits is stored in reverse order + from the target order. */ + bool reverse; + + /* The number of operands to CODE. */ + unsigned int num_ops; + + /* The operands to CODE. Only the first NUM_OPS entries are meaningful. */ + tree ops[MAX_NUM_OPS]; +}; + +inline +gimple_match_op::gimple_match_op () + : cond (gimple_match_cond::UNCOND), type (NULL_TREE), reverse (false), + num_ops (0) +{ +} + +/* Constructor that takes the condition, code, type and number of + operands, but leaves the caller to fill in the operands. */ + +inline +gimple_match_op::gimple_match_op (const gimple_match_cond &cond_in, + code_helper code_in, tree type_in, + unsigned int num_ops_in) + : cond (cond_in), code (code_in), type (type_in), reverse (false), + num_ops (num_ops_in) +{ +} + +/* Constructors for various numbers of operands. */ + +inline +gimple_match_op::gimple_match_op (const gimple_match_cond &cond_in, + code_helper code_in, tree type_in, + tree op0) + : cond (cond_in), code (code_in), type (type_in), reverse (false), + num_ops (1) +{ + ops[0] = op0; +} + +inline +gimple_match_op::gimple_match_op (const gimple_match_cond &cond_in, + code_helper code_in, tree type_in, + tree op0, tree op1) + : cond (cond_in), code (code_in), type (type_in), reverse (false), + num_ops (2) +{ + ops[0] = op0; + ops[1] = op1; +} + +inline +gimple_match_op::gimple_match_op (const gimple_match_cond &cond_in, + code_helper code_in, tree type_in, + tree op0, tree op1, tree op2) + : cond (cond_in), code (code_in), type (type_in), reverse (false), + num_ops (3) +{ + ops[0] = op0; + ops[1] = op1; + ops[2] = op2; +} + +inline +gimple_match_op::gimple_match_op (const gimple_match_cond &cond_in, + code_helper code_in, tree type_in, + tree op0, tree op1, tree op2, tree op3) + : cond (cond_in), code (code_in), type (type_in), reverse (false), + num_ops (4) +{ + ops[0] = op0; + ops[1] = op1; + ops[2] = op2; + ops[3] = op3; +} + +inline +gimple_match_op::gimple_match_op (const gimple_match_cond &cond_in, + code_helper code_in, tree type_in, + tree op0, tree op1, tree op2, tree op3, + tree op4) + : cond (cond_in), code (code_in), type (type_in), reverse (false), + num_ops (5) +{ + ops[0] = op0; + ops[1] = op1; + ops[2] = op2; + ops[3] = op3; + ops[4] = op4; +} + +/* Change the operation performed to CODE_IN, the type of the result to + TYPE_IN, and the number of operands to NUM_OPS_IN. The caller needs + to set the operands itself. */ + +inline void +gimple_match_op::set_op (code_helper code_in, tree type_in, + unsigned int num_ops_in) +{ + code = code_in; + type = type_in; + num_ops = num_ops_in; +} + +/* Functions for changing the operation performed, for various numbers + of operands. */ + +inline void +gimple_match_op::set_op (code_helper code_in, tree type_in, tree op0) +{ + code = code_in; + type = type_in; + num_ops = 1; + ops[0] = op0; +} + +inline void +gimple_match_op::set_op (code_helper code_in, tree type_in, tree op0, tree op1) +{ + code = code_in; + type = type_in; + num_ops = 2; + ops[0] = op0; + ops[1] = op1; +} + +inline void +gimple_match_op::set_op (code_helper code_in, tree type_in, + tree op0, tree op1, tree op2) +{ + code = code_in; + type = type_in; + num_ops = 3; + ops[0] = op0; + ops[1] = op1; + ops[2] = op2; +} + +inline void +gimple_match_op::set_op (code_helper code_in, tree type_in, + tree op0, tree op1, tree op2, bool reverse_in) +{ + code = code_in; + type = type_in; + reverse = reverse_in; + num_ops = 3; + ops[0] = op0; + ops[1] = op1; + ops[2] = op2; +} + +inline void +gimple_match_op::set_op (code_helper code_in, tree type_in, + tree op0, tree op1, tree op2, tree op3) +{ + code = code_in; + type = type_in; + num_ops = 4; + ops[0] = op0; + ops[1] = op1; + ops[2] = op2; + ops[3] = op3; +} + +inline void +gimple_match_op::set_op (code_helper code_in, tree type_in, + tree op0, tree op1, tree op2, tree op3, tree op4) +{ + code = code_in; + type = type_in; + num_ops = 5; + ops[0] = op0; + ops[1] = op1; + ops[2] = op2; + ops[3] = op3; + ops[4] = op4; +} + +/* Set the "operation" to be the single value VALUE, such as a constant + or SSA_NAME. */ + +inline void +gimple_match_op::set_value (tree value) +{ + set_op (TREE_CODE (value), TREE_TYPE (value), value); +} + +/* Return the value of operand I, or null if there aren't that many + operands. */ + +inline tree +gimple_match_op::op_or_null (unsigned int i) const +{ + return i < num_ops ? ops[i] : NULL_TREE; +} + +/* Return whether OP is a non-expression result and a gimple value. */ + +inline bool +gimple_simplified_result_is_gimple_val (const gimple_match_op *op) +{ + return (op->code.is_tree_code () + && (TREE_CODE_LENGTH ((tree_code) op->code) == 0 + || ((tree_code) op->code) == ADDR_EXPR) + && is_gimple_val (op->ops[0])); +} + +extern tree (*mprts_hook) (gimple_match_op *); + +bool gimple_extract_op (gimple *, gimple_match_op *); +bool gimple_simplify (gimple *, gimple_match_op *, gimple_seq *, + tree (*)(tree), tree (*)(tree)); +tree maybe_push_res_to_seq (gimple_match_op *, gimple_seq *, + tree res = NULL_TREE); +void maybe_build_generic_op (gimple_match_op *); + +bool commutative_binary_op_p (code_helper, tree); +bool commutative_ternary_op_p (code_helper, tree); +int first_commutative_argument (code_helper, tree); +bool associative_binary_op_p (code_helper, tree); +code_helper canonicalize_code (code_helper, tree); + +#ifdef GCC_OPTABS_TREE_H +bool directly_supported_p (code_helper, tree, optab_subtype = optab_default); +#endif + +internal_fn get_conditional_internal_fn (code_helper, tree); + +extern tree gimple_build (gimple_seq *, location_t, + code_helper, tree, tree); +inline tree +gimple_build (gimple_seq *seq, code_helper code, tree type, tree op0) +{ + return gimple_build (seq, UNKNOWN_LOCATION, code, type, op0); +} + +extern tree gimple_build (gimple_seq *, location_t, + code_helper, tree, tree, tree); +inline tree +gimple_build (gimple_seq *seq, code_helper code, tree type, tree op0, + tree op1) +{ + return gimple_build (seq, UNKNOWN_LOCATION, code, type, op0, op1); +} + +extern tree gimple_build (gimple_seq *, location_t, + code_helper, tree, tree, tree, tree); +inline tree +gimple_build (gimple_seq *seq, code_helper code, tree type, tree op0, + tree op1, tree op2) +{ + return gimple_build (seq, UNKNOWN_LOCATION, code, type, op0, op1, op2); +} + +#endif /* GCC_GIMPLE_MATCH_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-predicate-analysis.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-predicate-analysis.h new file mode 100644 index 0000000..c1843e8 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-predicate-analysis.h @@ -0,0 +1,158 @@ +/* Support for simple predicate analysis. + + Copyright (C) 2021-2022 Free Software Foundation, Inc. + Contributed by Martin Sebor + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + GCC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#ifndef GIMPLE_PREDICATE_ANALYSIS_H_INCLUDED +#define GIMPLE_PREDICATE_ANALYSIS_H_INCLUDED + +#define MAX_NUM_CHAINS 8 +#define MAX_CHAIN_LEN 5 +#define MAX_POSTDOM_CHECK 8 +#define MAX_SWITCH_CASES 40 + +/* Represents a simple Boolean predicate. */ +struct pred_info +{ + tree pred_lhs; + tree pred_rhs; + enum tree_code cond_code; + bool invert; +}; + +/* The type to represent a sequence of predicates grouped + with .AND. operation. */ +typedef vec pred_chain; + +/* The type to represent a sequence of pred_chains grouped + with .OR. operation. */ +typedef vec pred_chain_union; + +/* Represents a complex Boolean predicate expression. */ +class predicate +{ + public: + /* Base function object type used to determine whether an expression + is of interest. */ + struct func_t + { + typedef unsigned phi_arg_set_t; + + /* Return true if the argument is an expression of interest. */ + virtual bool operator()(tree) = 0; + /* Return a bitset of PHI arguments of interest. By default returns + bitset with a bit set for each argument. Should be called in + the overriden function first and, if nonzero, the result then + refined as appropriate. */ + virtual phi_arg_set_t phi_arg_set (gphi *); + + /* Maximum number of PHI arguments supported by phi_arg_set(). */ + static constexpr unsigned max_phi_args = + sizeof (phi_arg_set_t) * CHAR_BIT; + }; + + /* Construct with the specified EVAL object. */ + predicate (func_t &eval) + : m_preds (vNULL), m_eval (eval), m_use_expr () { } + + /* Copy. */ + predicate (const predicate &rhs) + : m_preds (vNULL), m_eval (rhs.m_eval), m_use_expr () + { + *this = rhs; + } + + predicate (basic_block, basic_block, func_t &); + + ~predicate (); + + /* Assign. */ + predicate& operator= (const predicate &); + + bool is_empty () const + { + return m_preds.is_empty (); + } + + const pred_chain_union chain () const + { + return m_preds; + } + + /* Return true if the use by a statement in the basic block of + a PHI operand is ruled out (i.e., guarded) by *THIS. */ + bool is_use_guarded (gimple *, basic_block, gphi *, unsigned); + + void init_from_control_deps (const vec *, unsigned); + + void dump (gimple *, const char *) const; + + void normalize (gimple * = NULL, bool = false); + void simplify (gimple * = NULL, bool = false); + + bool is_use_guarded (gimple *, basic_block, gphi *, unsigned, + hash_set *); + + /* Return the predicate expression guarding the definition of + the interesting variable, optionally inverted. */ + tree def_expr (bool = false) const; + /* Return the predicate expression guarding the use of the interesting + variable. */ + tree use_expr () const; + + tree expr (bool = false) const; + +private: + bool includes (const pred_chain &) const; + bool superset_of (const predicate &) const; + bool overlap (gphi *, unsigned, hash_set *); + bool use_cannot_happen (gphi *, unsigned); + + bool init_from_phi_def (gphi *); + + void push_pred (const pred_info &); + + /* Normalization functions. */ + void normalize (pred_chain *, pred_info, tree_code, pred_chain *, + hash_set *); + + void normalize (const pred_info &); + void normalize (const pred_chain &); + + /* Simplification functions. */ + bool simplify_2 (); + bool simplify_3 (); + bool simplify_4 (); + +private: + /* Representation of the predicate expression(s). */ + pred_chain_union m_preds; + /* Callback to evaluate an operand. Return true if it's interesting. */ + func_t &m_eval; + /* The predicate expression guarding the use of the interesting + variable. */ + tree m_use_expr; +}; + +/* Bit mask handling macros. */ +#define MASK_SET_BIT(mask, pos) mask |= (1 << pos) +#define MASK_TEST_BIT(mask, pos) (mask & (1 << pos)) +#define MASK_EMPTY(mask) (mask == 0) + +#endif // GIMPLE_PREDICATE_ANALYSIS_H_INCLUDED diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-predict.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-predict.h new file mode 100644 index 0000000..0e37dd8 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-predict.h @@ -0,0 +1,91 @@ +/* Gimple prediction routines. + + Copyright (C) 2007-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_PREDICT_H +#define GCC_GIMPLE_PREDICT_H + +#include "predict.h" + +/* Return the predictor of GIMPLE_PREDICT statement GS. */ + +static inline enum br_predictor +gimple_predict_predictor (const gimple *gs) +{ + GIMPLE_CHECK (gs, GIMPLE_PREDICT); + return (enum br_predictor) (gs->subcode & ~GF_PREDICT_TAKEN); +} + + +/* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */ + +static inline void +gimple_predict_set_predictor (gimple *gs, enum br_predictor predictor) +{ + GIMPLE_CHECK (gs, GIMPLE_PREDICT); + gs->subcode = (gs->subcode & GF_PREDICT_TAKEN) + | (unsigned) predictor; +} + + +/* Return the outcome of GIMPLE_PREDICT statement GS. */ + +static inline enum prediction +gimple_predict_outcome (const gimple *gs) +{ + GIMPLE_CHECK (gs, GIMPLE_PREDICT); + return (gs->subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN; +} + + +/* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */ + +static inline void +gimple_predict_set_outcome (gimple *gs, enum prediction outcome) +{ + GIMPLE_CHECK (gs, GIMPLE_PREDICT); + if (outcome == TAKEN) + gs->subcode |= GF_PREDICT_TAKEN; + else + gs->subcode &= ~GF_PREDICT_TAKEN; +} + +/* Build a GIMPLE_PREDICT statement. PREDICT is one of the predictors from + predict.def, OUTCOME is NOT_TAKEN or TAKEN. */ + +inline gimple * +gimple_build_predict (enum br_predictor predictor, enum prediction outcome) +{ + gimple *p = gimple_alloc (GIMPLE_PREDICT, 0); + /* Ensure all the predictors fit into the lower bits of the subcode. */ + gcc_assert ((int) END_PREDICTORS <= GF_PREDICT_TAKEN); + gimple_predict_set_predictor (p, predictor); + gimple_predict_set_outcome (p, outcome); + return p; +} + +/* Return true if GS is a GIMPLE_PREDICT statement. */ + +static inline bool +is_gimple_predict (const gimple *gs) +{ + return gimple_code (gs) == GIMPLE_PREDICT; +} + +#endif /* GCC_GIMPLE_PREDICT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-pretty-print.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-pretty-print.h new file mode 100644 index 0000000..2adac2a --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-pretty-print.h @@ -0,0 +1,41 @@ +/* Various declarations for pretty formatting of GIMPLE statements and + expressions. + Copyright (C) 2000-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_PRETTY_PRINT_H +#define GCC_GIMPLE_PRETTY_PRINT_H + +#include "tree-pretty-print.h" + +/* In gimple-pretty-print.cc */ +extern void debug_gimple_stmt (gimple *); +extern void debug_gimple_seq (gimple_seq); +extern void print_gimple_seq (FILE *, gimple_seq, int, dump_flags_t); +extern void print_gimple_stmt (FILE *, gimple *, int, dump_flags_t = TDF_NONE); +extern void debug (gimple &ref); +extern void debug (gimple *ptr); +extern void print_gimple_expr (FILE *, gimple *, int, dump_flags_t = TDF_NONE); +extern void pp_gimple_stmt_1 (pretty_printer *, const gimple *, int, + dump_flags_t); +extern void gimple_dump_bb (FILE *, basic_block, int, dump_flags_t); +extern void gimple_dump_bb_for_graph (pretty_printer *, basic_block); +extern void dump_ssaname_info_to_file (FILE *, tree, int); +extern void percent_G_format (text_info *); + +#endif /* ! GCC_GIMPLE_PRETTY_PRINT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-cache.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-cache.h new file mode 100644 index 0000000..589b649 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-cache.h @@ -0,0 +1,160 @@ +/* Header file for gimple ranger SSA cache. + Copyright (C) 2017-2022 Free Software Foundation, Inc. + Contributed by Andrew MacLeod . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_SSA_RANGE_CACHE_H +#define GCC_SSA_RANGE_CACHE_H + +#include "gimple-range-gori.h" + +// Class used to track non-null references of an SSA name. A vector +// of bitmaps indexed by SSA name is maintained. When indexed by +// basic block, an on-bit indicates there is a non-null dereference +// for that SSA in that block. + +class non_null_ref +{ +public: + non_null_ref (); + ~non_null_ref (); + bool non_null_deref_p (tree name, basic_block bb, bool search_dom = true); + bool adjust_range (irange &r, tree name, basic_block bb, + bool search_dom = true); + bool set_nonnull (basic_block bb, tree name); +private: + vec m_nn; + void process_name (tree name); + bitmap_obstack m_bitmaps; +}; + +// If NAME has a non-null dereference in block BB, adjust R with the +// non-zero information from non_null_deref_p, and return TRUE. If +// SEARCH_DOM is true, non_null_deref_p should search the dominator tree. + +inline bool +non_null_ref::adjust_range (irange &r, tree name, basic_block bb, + bool search_dom) +{ + // Non-call exceptions mean we could throw in the middle of the + // block, so just punt on those for now. + if (cfun->can_throw_non_call_exceptions) + return false; + // We only care about the null / non-null property of pointers. + if (!POINTER_TYPE_P (TREE_TYPE (name))) + return false; + if (r.undefined_p () || r.lower_bound () != 0 || r.upper_bound () == 0) + return false; + // Check if pointers have any non-null dereferences. + if (non_null_deref_p (name, bb, search_dom)) + { + // Remove zero from the range. + unsigned prec = TYPE_PRECISION (TREE_TYPE (name)); + r.intersect (wi::one (prec), wi::max_value (prec, UNSIGNED)); + return true; + } + return false; +} + +// This class manages a vector of pointers to ssa_block ranges. It +// provides the basis for the "range on entry" cache for all +// SSA names. + +class block_range_cache +{ +public: + block_range_cache (); + ~block_range_cache (); + + bool set_bb_range (tree name, const_basic_block bb, const irange &r); + bool get_bb_range (irange &r, tree name, const_basic_block bb); + bool bb_range_p (tree name, const_basic_block bb); + + void dump (FILE *f); + void dump (FILE *f, basic_block bb, bool print_varying = true); +private: + vec m_ssa_ranges; + ssa_block_ranges &get_block_ranges (tree name); + ssa_block_ranges *query_block_ranges (tree name); + irange_allocator *m_irange_allocator; + bitmap_obstack m_bitmaps; +}; + +// This global cache is used with the range engine as markers for what +// has been visited during this incarnation. Once the ranger evaluates +// a name, it is typically not re-evaluated again. + +class ssa_global_cache +{ +public: + ssa_global_cache (); + ~ssa_global_cache (); + bool get_global_range (irange &r, tree name) const; + bool set_global_range (tree name, const irange &r); + void clear_global_range (tree name); + void clear (); + void dump (FILE *f = stderr); +private: + vec m_tab; + class irange_allocator *m_irange_allocator; +}; + +// This class provides all the caches a global ranger may need, and makes +// them available for gori-computes to query so outgoing edges can be +// properly calculated. + +class ranger_cache : public range_query +{ +public: + ranger_cache (int not_executable_flag); + ~ranger_cache (); + + virtual bool range_of_expr (irange &r, tree name, gimple *stmt); + virtual bool range_on_edge (irange &r, edge e, tree expr); + bool block_range (irange &r, basic_block bb, tree name, bool calc = true); + bool range_from_dom (irange &r, tree name, basic_block bb); + + bool get_global_range (irange &r, tree name) const; + bool get_global_range (irange &r, tree name, bool ¤t_p); + void set_global_range (tree name, const irange &r); + + void propagate_updated_value (tree name, basic_block bb); + + void block_apply_nonnull (gimple *s); + void update_to_nonnull (basic_block bb, tree name); + non_null_ref m_non_null; + gori_compute m_gori; + + void dump_bb (FILE *f, basic_block bb); + virtual void dump (FILE *f) OVERRIDE; +private: + ssa_global_cache m_globals; + block_range_cache m_on_entry; + class temporal_cache *m_temporal; + void fill_block_cache (tree name, basic_block bb, basic_block def_bb); + void propagate_cache (tree name); + + void range_of_def (irange &r, tree name, basic_block bb = NULL); + void entry_range (irange &r, tree expr, basic_block bb); + void exit_range (irange &r, tree expr, basic_block bb); + + vec m_workback; + class update_list *m_update; +}; + +#endif // GCC_SSA_RANGE_CACHE_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-edge.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-edge.h new file mode 100644 index 0000000..c131b33 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-edge.h @@ -0,0 +1,58 @@ +/* Gimple range edge header file. + Copyright (C) 2020-2022 Free Software Foundation, Inc. + Contributed by Andrew MacLeod + and Aldy Hernandez . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GIMPLE_RANGE_EDGE_H +#define GIMPLE_RANGE_EDGE_H + +// This class is used to query ranges on constant edges in GIMPLE. +// +// For a COND_EXPR, the TRUE edge will return [1,1] and the false edge a [0,0]. +// +// For SWITCH_EXPR, it is awkward to calculate ranges. When a request +// is made, the entire switch is evalauted and the results cached. +// Any future requests to that switch will use the cached value, providing +// dramatic decrease in computation time. +// +// The API is simple, just ask for the range on the edge. +// The return value is NULL for no range, or the branch statement which the +// edge gets the range from, along with the range. + +class gimple_outgoing_range +{ +public: + gimple_outgoing_range (int max_sw_edges = INT_MAX); + ~gimple_outgoing_range (); + gimple *edge_range_p (irange &r, edge e); +private: + void calc_switch_ranges (gswitch *sw); + bool get_edge_range (irange &r, gimple *s, edge e); + + int m_max_edges; + hash_map *m_edge_table; + irange_allocator m_range_allocator; +}; + +// If there is a range control statement at the end of block BB, return it. +gimple *gimple_outgoing_range_stmt_p (basic_block bb); +// Return the range on edge E if it is from a GCOND. Either TRUE or FALSE. +void gcond_edge_range (irange &r, edge e); + +#endif // GIMPLE_RANGE_EDGE_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-fold.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-fold.h new file mode 100644 index 0000000..4fe8d06 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-fold.h @@ -0,0 +1,194 @@ +/* Header file for the GIMPLE fold_using_range interface. + Copyright (C) 2019-2022 Free Software Foundation, Inc. + Contributed by Andrew MacLeod + and Aldy Hernandez . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_RANGE_FOLD_H +#define GCC_GIMPLE_RANGE_FOLD_H + +// This file is the main include point for gimple range folding. +// These routines will fold stmt S into the result irange R. +// Any ssa_names on the stmt will be calculated using the range_query +// parameter via a call to range_of_expr. +// If no range_query is provided, current global range info will be used. +// The second variation specifies an edge, and stmt S is recalculated as if +// it appeared on that edge. + +// Fold stmt S into range R using range query Q. +bool fold_range (irange &r, gimple *s, range_query *q = NULL); +// Recalculate stmt S into R using range query Q as if it were on edge ON_EDGE. +bool fold_range (irange &r, gimple *s, edge on_edge, range_query *q = NULL); + +// These routines the operands to be specified when manually folding. +// Any excess queries will be drawn from the current range_query. +bool fold_range (irange &r, gimple *s, irange &r1); +bool fold_range (irange &r, gimple *s, irange &r1, irange &r2); +bool fold_range (irange &r, gimple *s, unsigned num_elements, irange *vector); + +// Return the range_operator pointer for this statement. This routine +// can also be used to gate whether a routine is range-ops enabled. + +static inline range_operator * +gimple_range_handler (const gimple *s) +{ + if (const gassign *ass = dyn_cast (s)) + return range_op_handler (gimple_assign_rhs_code (ass), + TREE_TYPE (gimple_assign_lhs (ass))); + if (const gcond *cond = dyn_cast (s)) + return range_op_handler (gimple_cond_code (cond), + TREE_TYPE (gimple_cond_lhs (cond))); + return NULL; +} + +// Return the type of range which statement S calculates. If the type is +// unsupported or no type can be determined, return NULL_TREE. + +static inline tree +gimple_range_type (const gimple *s) +{ + tree lhs = gimple_get_lhs (s); + tree type = NULL_TREE; + if (lhs) + type = TREE_TYPE (lhs); + else + { + enum gimple_code code = gimple_code (s); + if (code == GIMPLE_COND) + type = boolean_type_node; + else if (code == GIMPLE_PHI) + type = TREE_TYPE (gimple_phi_result (s)); + else if (code == GIMPLE_CALL) + { + type = gimple_call_fntype (s); + // If it has a type, get the return type. + if (type) + type = TREE_TYPE (type); + } + } + if (irange::supports_type_p (type)) + return type; + return NULL_TREE; +} + +// Return EXP if it is an SSA_NAME with a type supported by gimple ranges. + +static inline tree +gimple_range_ssa_p (tree exp) +{ + if (exp && TREE_CODE (exp) == SSA_NAME && + !SSA_NAME_IS_VIRTUAL_OPERAND (exp) && + !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp) && + irange::supports_type_p (TREE_TYPE (exp))) + return exp; + return NULL_TREE; +} + +// Return true if TYPE1 and TYPE2 are compatible range types. + +static inline bool +range_compatible_p (tree type1, tree type2) +{ + // types_compatible_p requires conversion in both directions to be useless. + // GIMPLE only requires a cast one way in order to be compatible. + // Ranges really only need the sign and precision to be the same. + return (TYPE_PRECISION (type1) == TYPE_PRECISION (type2) + && TYPE_SIGN (type1) == TYPE_SIGN (type2)); +} + + +// Source of all operands for fold_using_range and gori_compute. +// It abstracts out the source of an operand so it can come from a stmt or +// and edge or anywhere a derived class of fur_source wants. +// The default simply picks up ranges from the current range_query. + +class fur_source +{ +public: + fur_source (range_query *q = NULL); + inline range_query *query () { return m_query; } + inline class gori_compute *gori () { return m_gori; }; + virtual bool get_operand (irange &r, tree expr); + virtual bool get_phi_operand (irange &r, tree expr, edge e); + virtual relation_kind query_relation (tree op1, tree op2); + virtual void register_relation (gimple *stmt, relation_kind k, tree op1, + tree op2); + virtual void register_relation (edge e, relation_kind k, tree op1, + tree op2); + void register_outgoing_edges (gcond *, irange &lhs_range, edge e0, edge e1); +protected: + range_query *m_query; + gori_compute *m_gori; +}; + +// fur_stmt is the specification for drawing an operand from range_query Q +// via a range_of_Expr call on stmt S. + +class fur_stmt : public fur_source +{ +public: + fur_stmt (gimple *s, range_query *q = NULL); + virtual bool get_operand (irange &r, tree expr) OVERRIDE; + virtual bool get_phi_operand (irange &r, tree expr, edge e) OVERRIDE; + virtual relation_kind query_relation (tree op1, tree op2) OVERRIDE; +private: + gimple *m_stmt; +}; + +// This version of fur_source will pick a range from a stmt, and also register +// dependencies via a gori_compute object. This is mostly an internal API. + +class fur_depend : public fur_stmt +{ +public: + fur_depend (gimple *s, gori_compute *gori, range_query *q = NULL); + virtual void register_relation (gimple *stmt, relation_kind k, tree op1, + tree op2) OVERRIDE; + virtual void register_relation (edge e, relation_kind k, tree op1, + tree op2) OVERRIDE; +protected: + relation_oracle *m_oracle; +}; + +extern tree gimple_range_operand1 (const gimple *s); +extern tree gimple_range_operand2 (const gimple *s); + +// This class uses ranges to fold a gimple statement producinf a range for +// the LHS. The source of all operands is supplied via the fur_source class +// which provides a range_query as well as a source location and any other +// required information. + +class fold_using_range +{ +public: + bool fold_stmt (irange &r, gimple *s, class fur_source &src, + tree name = NULL_TREE); +protected: + bool range_of_range_op (irange &r, gimple *s, fur_source &src); + bool range_of_call (irange &r, gcall *call, fur_source &src); + bool range_of_cond_expr (irange &r, gassign* cond, fur_source &src); + bool range_of_address (irange &r, gimple *s, fur_source &src); + bool range_of_builtin_call (irange &r, gcall *call, fur_source &src); + void range_of_builtin_ubsan_call (irange &r, gcall *call, tree_code code, + fur_source &src); + bool range_of_phi (irange &r, gphi *phi, fur_source &src); + void range_of_ssa_name_with_loop_info (irange &, tree, class loop *, gphi *, + fur_source &src); + void relation_fold_and_or (irange& lhs_range, gimple *s, fur_source &src); +}; +#endif // GCC_GIMPLE_RANGE_FOLD_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-gori.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-gori.h new file mode 100644 index 0000000..605884e --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-gori.h @@ -0,0 +1,229 @@ +/* Header file for gimple range GORI structures. + Copyright (C) 2017-2022 Free Software Foundation, Inc. + Contributed by Andrew MacLeod + and Aldy Hernandez . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_RANGE_GORI_H +#define GCC_GIMPLE_RANGE_GORI_H + +// RANGE_DEF_CHAIN is used to determine which SSA names in a block can +// have range information calculated for them, and what the +// dependencies on each other are. + +class range_def_chain +{ +public: + range_def_chain (); + ~range_def_chain (); + tree depend1 (tree name) const; + tree depend2 (tree name) const; + bool in_chain_p (tree name, tree def); + bool chain_import_p (tree name, tree import); + void register_dependency (tree name, tree ssa1, basic_block bb = NULL); + void dump (FILE *f, basic_block bb, const char *prefix = NULL); +protected: + bool has_def_chain (tree name); + bool def_chain_in_bitmap_p (tree name, bitmap b); + void add_def_chain_to_bitmap (bitmap b, tree name); + bitmap get_def_chain (tree name); + bitmap get_imports (tree name); + bitmap_obstack m_bitmaps; +private: + struct rdc { + tree ssa1; // First direct dependency + tree ssa2; // Second direct dependency + bitmap bm; // All dependencies + bitmap m_import; + }; + vec m_def_chain; // SSA_NAME : def chain components. + void set_import (struct rdc &data, tree imp, bitmap b); + int m_logical_depth; +}; + +// Return the first direct dependency for NAME, if there is one. +// Direct dependencies are those which occur on the defintion statement. +// Only the first 2 such names are cached. + +inline tree +range_def_chain::depend1 (tree name) const +{ + unsigned v = SSA_NAME_VERSION (name); + if (v >= m_def_chain.length ()) + return NULL_TREE; + return m_def_chain[v].ssa1; +} + +// Return the second direct dependency for NAME, if there is one. + +inline tree +range_def_chain::depend2 (tree name) const +{ + unsigned v = SSA_NAME_VERSION (name); + if (v >= m_def_chain.length ()) + return NULL_TREE; + return m_def_chain[v].ssa2; +} + +// GORI_MAP is used to accumulate what SSA names in a block can +// generate range information, and provides tools for the block ranger +// to enable it to efficiently calculate these ranges. + +class gori_map : public range_def_chain +{ +public: + gori_map (); + ~gori_map (); + + bool is_export_p (tree name, basic_block bb = NULL); + bool is_import_p (tree name, basic_block bb); + bitmap exports (basic_block bb); + bitmap imports (basic_block bb); + void set_range_invariant (tree name); + + void dump (FILE *f); + void dump (FILE *f, basic_block bb, bool verbose = true); +private: + vec m_outgoing; // BB: Outgoing ranges calculatable on edges + vec m_incoming; // BB: Incoming ranges which can affect exports. + bitmap m_maybe_variant; // Names which might have outgoing ranges. + void maybe_add_gori (tree name, basic_block bb); + void calculate_gori (basic_block bb); +}; + + +// This class is used to determine which SSA_NAMES can have ranges +// calculated for them on outgoing edges from basic blocks. This represents +// ONLY the effect of the basic block edge->src on a range. +// +// There are 2 primary entry points: +// +// has_edge_range_p (tree name, edge e) +// returns true if the outgoing edge *may* be able to produce range +// information for ssa_name NAME on edge E. +// FALSE is returned if this edge does not affect the range of NAME. +// if no edge is specified, return TRUE if name may have a value calculated +// on *ANY* edge that has been seen. FALSE indicates that the global value +// is applicable everywhere that has been processed. +// +// outgoing_edge_range_p (irange &range, edge e, tree name) +// Actually does the calculation of RANGE for name on E +// This represents application of whatever static range effect edge E +// may have on NAME, not any cumulative effect. + +// There are also some internal APIs +// +// ssa_range_in_bb () is an internal routine which is used to start any +// calculation chain using SSA_NAMES which come from outside the block. ie +// a_2 = b_4 - 8 +// if (a_2 < 30) +// on the true edge, a_2 is known to be [0, 29] +// b_4 can be calculated as [8, 37] +// during this calculation, b_4 is considered an "import" and ssa_range_in_bb +// is queried for a starting range which is used in the calculation. +// A default value of VARYING provides the raw static info for the edge. +// +// If there is any known range for b_4 coming into this block, it can refine +// the results. This allows for cascading results to be propogated. +// if b_4 is [100, 200] on entry to the block, feeds into the calculation +// of a_2 = [92, 192], and finally on the true edge the range would be +// an empty range [] because it is not possible for the true edge to be taken. +// +// expr_range_in_bb is simply a wrapper which calls ssa_range_in_bb for +// SSA_NAMES and otherwise simply calculates the range of the expression. +// +// The constructor takes a flag value to use on edges to check for the +// NON_EXECUTABLE_EDGE property. The zero default means no flag is checked. +// All value requests from NON_EXECUTABLE_EDGE edges are returned UNDEFINED. +// +// The remaining routines are internal use only. + +class gori_compute : public gori_map +{ +public: + gori_compute (int not_executable_flag = 0); + bool outgoing_edge_range_p (irange &r, edge e, tree name, range_query &q); + bool condexpr_adjust (irange &r1, irange &r2, gimple *s, tree cond, tree op1, + tree op2, fur_source &src); + bool has_edge_range_p (tree name, basic_block bb = NULL); + bool has_edge_range_p (tree name, edge e); + void dump (FILE *f); +private: + bool may_recompute_p (tree name, edge e); + bool may_recompute_p (tree name, basic_block bb = NULL); + bool compute_operand_range (irange &r, gimple *stmt, const irange &lhs, + tree name, class fur_source &src); + bool compute_operand_range_switch (irange &r, gswitch *s, const irange &lhs, + tree name, fur_source &src); + bool compute_operand1_range (irange &r, gimple *stmt, const irange &lhs, + tree name, fur_source &src); + bool compute_operand2_range (irange &r, gimple *stmt, const irange &lhs, + tree name, fur_source &src); + bool compute_operand1_and_operand2_range (irange &r, gimple *stmt, + const irange &lhs, tree name, + fur_source &src); + void compute_logical_operands (irange &true_range, irange &false_range, + gimple *stmt, const irange &lhs, + tree name, fur_source &src, tree op, + bool op_in_chain); + bool logical_combine (irange &r, enum tree_code code, const irange &lhs, + const irange &op1_true, const irange &op1_false, + const irange &op2_true, const irange &op2_false); + int_range<2> m_bool_zero; // Boolean false cached. + int_range<2> m_bool_one; // Boolean true cached. + + gimple_outgoing_range outgoing; // Edge values for COND_EXPR & SWITCH_EXPR. + range_tracer tracer; + int m_not_executable_flag; +}; + +// These routines provide a GIMPLE interface to the range-ops code. +extern bool gimple_range_calc_op1 (irange &r, const gimple *s, + const irange &lhs_range); +extern bool gimple_range_calc_op1 (irange &r, const gimple *s, + const irange &lhs_range, + const irange &op2_range); +extern bool gimple_range_calc_op2 (irange &r, const gimple *s, + const irange &lhs_range, + const irange &op1_range); + +// For each name that is an import into BB's exports.. +#define FOR_EACH_GORI_IMPORT_NAME(gori, bb, name) \ + for (gori_export_iterator iter ((gori).imports ((bb))); \ + ((name) = iter.get_name ()); \ + iter.next ()) + +// For each name possibly exported from block BB. +#define FOR_EACH_GORI_EXPORT_NAME(gori, bb, name) \ + for (gori_export_iterator iter ((gori).exports ((bb))); \ + ((name) = iter.get_name ()); \ + iter.next ()) + +// Used to assist with iterating over the GORI export list in various ways +class gori_export_iterator { +public: + gori_export_iterator (bitmap b); + void next (); + tree get_name (); +protected: + bitmap bm; + bitmap_iterator bi; + unsigned y; +}; + +#endif // GCC_GIMPLE_RANGE_GORI_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-path.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-path.h new file mode 100644 index 0000000..1820626 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-path.h @@ -0,0 +1,110 @@ +/* Header file for jump threading path solver. + Copyright (C) 2021-2022 Free Software Foundation, Inc. + Contributed by Aldy Hernandez . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_TREE_SSA_THREADSOLVER_H +#define GCC_TREE_SSA_THREADSOLVER_H + +// This class is a basic block path solver. Given a set of BBs +// indicating a path through the CFG, range_of_expr and range_of_stmt +// will calculate the range of an SSA or STMT as if the BBs in the +// path would have been executed in order. +// +// Note that the blocks are in reverse order, thus the exit block is +// path[0]. + +class path_range_query : public range_query +{ +public: + path_range_query (bool resolve = true, class gimple_ranger *ranger = NULL); + virtual ~path_range_query (); + void compute_ranges (const vec &, + const bitmap_head *imports = NULL); + void compute_ranges (edge e); + void compute_imports (bitmap imports, basic_block exit); + bool range_of_expr (irange &r, tree name, gimple * = NULL) override; + bool range_of_stmt (irange &r, gimple *, tree name = NULL) override; + bool unreachable_path_p (); + void dump (FILE *) override; + void debug (); + +private: + bool internal_range_of_expr (irange &r, tree name, gimple *); + bool defined_outside_path (tree name); + void range_on_path_entry (irange &r, tree name); + path_oracle *get_path_oracle () { return (path_oracle *)m_oracle; } + + // Cache manipulation. + void set_cache (const irange &r, tree name); + bool get_cache (irange &r, tree name); + void clear_cache (tree name); + + // Methods to compute ranges for the given path. + bool range_defined_in_block (irange &, tree name, basic_block bb); + void compute_ranges_in_block (basic_block bb); + void compute_ranges_in_phis (basic_block bb); + void adjust_for_non_null_uses (basic_block bb); + void ssa_range_in_phi (irange &r, gphi *phi); + void compute_outgoing_relations (basic_block bb, basic_block next); + void compute_phi_relations (basic_block bb, basic_block prev); + void maybe_register_phi_relation (gphi *, edge e); + bool add_to_imports (tree name, bitmap imports); + bool import_p (tree name); + bool ssa_defined_in_bb (tree name, basic_block bb); + bool relations_may_be_invalidated (edge); + + // Path navigation. + void set_path (const vec &); + basic_block entry_bb () { return m_path[m_path.length () - 1]; } + basic_block exit_bb () { return m_path[0]; } + basic_block curr_bb () { return m_path[m_pos]; } + basic_block prev_bb () { return m_path[m_pos + 1]; } + basic_block next_bb () { return m_path[m_pos - 1]; } + bool at_entry () { return m_pos == m_path.length () - 1; } + bool at_exit () { return m_pos == 0; } + void move_next () { --m_pos; } + + // Range cache for SSA names. + ssa_global_cache *m_cache; + + // Set for each SSA that has an active entry in the cache. + bitmap m_has_cache_entry; + + // Path being analyzed. + auto_vec m_path; + + auto_bitmap m_imports; + gimple_ranger *m_ranger; + non_null_ref m_non_null; + + // Current path position. + unsigned m_pos; + + // Use ranger to resolve anything not known on entry. + bool m_resolve; + + // Set if there were any undefined expressions while pre-calculating path. + bool m_undefined_path; + + // True if m_ranger was allocated in this class and must be freed at + // destruction. + bool m_alloced_ranger; +}; + +#endif // GCC_TREE_SSA_THREADSOLVER_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-trace.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-trace.h new file mode 100644 index 0000000..302afda --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range-trace.h @@ -0,0 +1,78 @@ +/* Header file for the GIMPLE range tracing/debugging facilties. + Copyright (C) 2021-2022 Free Software Foundation, Inc. + Contributed by Andrew MacLeod + and Aldy Hernandez . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_RANGE_TRACE_H +#define GCC_GIMPLE_RANGE_TRACE_H + +// This class manages range tracing for the ranger and gori components. +// Tracing will provide a unique integer index whenever a new trace +// is started. This can be used to identify where a calculation has gone wrong. + +class range_tracer +{ +public: + range_tracer (const char *name = ""); + unsigned header (const char *str); + void trailer (unsigned counter, const char *caller, bool result, tree name, + const irange &r); + void print (unsigned counter, const char *str); + inline void enable_trace () { tracing = true; } + inline void disable_trace () { tracing = false; } + virtual void breakpoint (unsigned index); +private: + unsigned do_header (const char *str); + void print_prefix (unsigned idx, bool blanks); + static const unsigned bump = 2; + unsigned indent; + static const unsigned name_len = 100; + char component[name_len]; + bool tracing; +}; + + +// If tracing is enabled, start a new trace header, returning the trace index. +// Otherwise return 0. + +inline unsigned +range_tracer::header (const char *str) +{ + if (tracing) + return do_header (str); + return 0; +} + +// RAII class to change current dump_file and dump_flags, and restore +// when the object goes out of scope. + +class push_dump_file +{ +public: + push_dump_file (FILE *, dump_flags_t); + ~push_dump_file (); +private: + FILE *old_dump_file; + dump_flags_t old_dump_flags; +}; + +void dump_ranger (FILE *); +void dump_ranger (FILE *, const vec &path); + +#endif // GCC_GIMPLE_RANGE_TRACE_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range.h new file mode 100644 index 0000000..0733a53 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-range.h @@ -0,0 +1,80 @@ +/* Header file for the GIMPLE range interface. + Copyright (C) 2019-2022 Free Software Foundation, Inc. + Contributed by Andrew MacLeod + and Aldy Hernandez . + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_RANGE_H +#define GCC_GIMPLE_RANGE_H + +#include "range.h" +#include "value-query.h" +#include "range-op.h" +#include "gimple-range-trace.h" +#include "gimple-range-edge.h" +#include "gimple-range-fold.h" +#include "gimple-range-gori.h" +#include "gimple-range-cache.h" + +// This is the basic range generator interface. +// +// This base class provides all the API entry points, but only provides +// functionality at the statement level. Ie, it can calculate ranges on +// statements, but does no additonal lookup. +// +// All the range_of_* methods will return a range if the types is +// supported by the range engine. It may be the full range for the +// type, AKA varying_p or it may be a refined range. If the range +// type is not supported, then false is returned. Non-statement +// related methods return whatever the current global value is. + +class gimple_ranger : public range_query +{ +public: + gimple_ranger (); + ~gimple_ranger (); + virtual bool range_of_stmt (irange &r, gimple *, tree name = NULL) OVERRIDE; + virtual bool range_of_expr (irange &r, tree name, gimple * = NULL) OVERRIDE; + virtual bool range_on_edge (irange &r, edge e, tree name) OVERRIDE; + void range_on_entry (irange &r, basic_block bb, tree name); + void range_on_exit (irange &r, basic_block bb, tree name); + void export_global_ranges (); + inline gori_compute &gori () { return m_cache.m_gori; } + virtual void dump (FILE *f) OVERRIDE; + void debug (); + void dump_bb (FILE *f, basic_block bb); + auto_edge_flag non_executable_edge_flag; + bool fold_stmt (gimple_stmt_iterator *gsi, tree (*) (tree)); + void register_side_effects (gimple *s); +protected: + bool fold_range_internal (irange &r, gimple *s, tree name); + void prefill_name (irange &r, tree name); + void prefill_stmt_dependencies (tree ssa); + ranger_cache m_cache; + range_tracer tracer; + basic_block current_bb; + vec m_stmt_list; +}; + +/* Create a new ranger instance and associate it with a function. + Each call must be paired with a call to disable_ranger to release + resources. */ +extern gimple_ranger *enable_ranger (struct function *); +extern void disable_ranger (struct function *); + +#endif // GCC_GIMPLE_RANGE_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-ssa-evrp-analyze.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-ssa-evrp-analyze.h new file mode 100644 index 0000000..51c3209 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-ssa-evrp-analyze.h @@ -0,0 +1,58 @@ +/* Support routines for Value Range Propagation (VRP). + Copyright (C) 2016-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_SSA_EVRP_ANALYZE_H +#define GCC_GIMPLE_SSA_EVRP_ANALYZE_H + +class evrp_range_analyzer : public vr_values +{ + public: + evrp_range_analyzer (bool update_global_ranges); + ~evrp_range_analyzer (void) + { + stack.release (); + } + + void enter (basic_block); + void push_marker (void); + void pop_to_marker (void); + void leave (basic_block); + void record_ranges_from_stmt (gimple *, bool); + + /* Record a new unwindable range. */ + void push_value_range (tree var, value_range_equiv *vr); + + private: + DISABLE_COPY_AND_ASSIGN (evrp_range_analyzer); + + void pop_value_range (); + value_range_equiv *try_find_new_range (tree, tree op, tree_code code, + tree limit); + void record_ranges_from_incoming_edge (basic_block); + void record_ranges_from_phis (basic_block); + void set_ssa_range_info (tree, value_range_equiv *); + + /* STACK holds the old VR. */ + auto_vec > stack; + + /* True if we are updating global ranges, false otherwise. */ + bool m_update_global_ranges; +}; + +#endif /* GCC_GIMPLE_SSA_EVRP_ANALYZE_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-ssa-warn-access.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-ssa-warn-access.h new file mode 100644 index 0000000..79ff0ff --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-ssa-warn-access.h @@ -0,0 +1,48 @@ +/* Pass to detect and issue warnings for invalid accesses, including + invalid or mismatched allocation/deallocation calls. + + Copyright (C) 2020-2022 Free Software Foundation, Inc. + Contributed by Martin Sebor . + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 3, or (at your option) any later + version. + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#ifndef GCC_GIMPLE_SSA_WARN_ACCESS_H +#define GCC_GIMPLE_SSA_WARN_ACCESS_H + +extern bool check_nul_terminated_array (tree, tree, tree = NULL_TREE); +extern void warn_string_no_nul (location_t, gimple *, const char *, tree, + tree, tree = NULL_TREE, bool = false, + const wide_int[2] = NULL); +extern void warn_string_no_nul (location_t, tree, const char *, tree, + tree, tree = NULL_TREE, bool = false, + const wide_int[2] = NULL); +extern tree unterminated_array (tree, tree * = NULL, bool * = NULL); + +extern bool maybe_warn_nonstring_arg (tree, gimple *); +extern bool maybe_warn_nonstring_arg (tree, tree); + +class access_data; +extern bool maybe_warn_for_bound (opt_code, location_t, gimple *, tree, + tree[2], tree, const access_data * = NULL); +extern bool maybe_warn_for_bound (opt_code, location_t, tree, tree, + tree[2], tree, const access_data * = NULL); + +class access_data; +extern bool check_access (tree, tree, tree, tree, tree, access_mode, + const access_data * = NULL); + +#endif // GCC_GIMPLE_SSA_WARN_ACCESS_H diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-ssa-warn-restrict.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-ssa-warn-restrict.h new file mode 100644 index 0000000..2bdde03 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-ssa-warn-restrict.h @@ -0,0 +1,29 @@ +/* Warn on violations of the restrict qualifier. + Copyright (C) 2017-2022 Free Software Foundation, Inc. + Contributed by Martin Sebor . + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 3, or (at your option) any later + version. + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#ifndef GIMPLE_SSA_WARN_RESTRICT_H + +extern opt_code check_bounds_or_overlap (gimple *, tree, tree, tree, tree, + bool = false, bool = true); +extern opt_code check_bounds_or_overlap (class pointer_query &, gimple *, + tree, tree, tree, tree, + bool = false, bool = true); + +#endif /* GIMPLE_SSA_WARN_RESTRICT_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-ssa.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-ssa.h new file mode 100644 index 0000000..53bc6bc --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-ssa.h @@ -0,0 +1,201 @@ +/* Header file for routines that straddle the border between GIMPLE and + SSA in gimple. + Copyright (C) 2009-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_SSA_H +#define GCC_GIMPLE_SSA_H + +#include "tree-ssa-operands.h" + +/* This structure is used to map a gimple statement to a label, + or list of labels to represent transaction restart. */ + +struct GTY((for_user)) tm_restart_node { + gimple *stmt; + tree label_or_list; +}; + +/* Hasher for tm_restart_node. */ + +struct tm_restart_hasher : ggc_ptr_hash +{ + static hashval_t hash (tm_restart_node *n) { return htab_hash_pointer (n); } + + static bool + equal (tm_restart_node *a, tm_restart_node *b) + { + return a == b; + } +}; + +extern void gt_ggc_mx (gimple *&); +extern void gt_pch_nx (gimple *&); + +struct ssa_name_hasher : ggc_ptr_hash +{ + /* Hash a tree in a uid_decl_map. */ + + static hashval_t + hash (tree item) + { + return item->ssa_name.var->decl_minimal.uid; + } + + /* Return true if the DECL_UID in both trees are equal. */ + + static bool + equal (tree a, tree b) +{ + return (a->ssa_name.var->decl_minimal.uid == b->ssa_name.var->decl_minimal.uid); +} +}; + +/* Gimple dataflow datastructure. All publicly available fields shall have + gimple_ accessor defined, all publicly modifiable fields should have + gimple_set accessor. */ +struct GTY(()) gimple_df { + /* Array of all SSA_NAMEs used in the function. */ + vec *ssa_names; + + /* Artificial variable used for the virtual operand FUD chain. */ + tree vop; + + /* The PTA solution for the ESCAPED artificial variable. */ + struct pt_solution escaped; + + /* A map of decls to artificial ssa-names that point to the partition + of the decl. */ + hash_map * GTY((skip(""))) decls_to_pointers; + + /* Free list of SSA_NAMEs. */ + vec *free_ssanames; + + /* Queue of SSA_NAMEs to be freed at the next opportunity. */ + vec *free_ssanames_queue; + + /* Hashtable holding definition for symbol. If this field is not NULL, it + means that the first reference to this variable in the function is a + USE or a VUSE. In those cases, the SSA renamer creates an SSA name + for this variable with an empty defining statement. */ + hash_table *default_defs; + + /* True if there are any symbols that need to be renamed. */ + unsigned int ssa_renaming_needed : 1; + + /* True if all virtual operands need to be renamed. */ + unsigned int rename_vops : 1; + + /* True if the code is in ssa form. */ + unsigned int in_ssa_p : 1; + + /* True if IPA points-to information was computed for this function. */ + unsigned int ipa_pta : 1; + + struct ssa_operands ssa_operands; + + /* Map gimple stmt to tree label (or list of labels) for transaction + restart and abort. */ + hash_table *tm_restart; +}; + + +/* Return true when gimple SSA form was built. + gimple_in_ssa_p is queried by gimplifier in various early stages before SSA + infrastructure is initialized. Check for presence of the datastructures + at first place. */ +static inline bool +gimple_in_ssa_p (const struct function *fun) +{ + return fun && fun->gimple_df && fun->gimple_df->in_ssa_p; +} + +/* Artificial variable used for the virtual operand FUD chain. */ +static inline tree +gimple_vop (const struct function *fun) +{ + gcc_checking_assert (fun && fun->gimple_df); + return fun->gimple_df->vop; +} + +/* Return the set of VUSE operand for statement G. */ + +static inline use_operand_p +gimple_vuse_op (const gimple *g) +{ + struct use_optype_d *ops; + const gimple_statement_with_memory_ops *mem_ops_stmt = + dyn_cast (g); + if (!mem_ops_stmt) + return NULL_USE_OPERAND_P; + ops = mem_ops_stmt->use_ops; + if (ops + && USE_OP_PTR (ops)->use == &mem_ops_stmt->vuse) + return USE_OP_PTR (ops); + return NULL_USE_OPERAND_P; +} + +/* Return the set of VDEF operand for statement G. */ + +static inline def_operand_p +gimple_vdef_op (gimple *g) +{ + gimple_statement_with_memory_ops *mem_ops_stmt = + dyn_cast (g); + if (!mem_ops_stmt) + return NULL_DEF_OPERAND_P; + if (mem_ops_stmt->vdef) + return &mem_ops_stmt->vdef; + return NULL_DEF_OPERAND_P; +} + +/* Mark statement S as modified, and update it. */ + +static inline void +update_stmt (gimple *s) +{ + if (gimple_has_ops (s)) + { + gimple_set_modified (s, true); + update_stmt_operands (cfun, s); + } +} + +/* Update statement S if it has been optimized. */ + +static inline void +update_stmt_if_modified (gimple *s) +{ + if (gimple_modified_p (s)) + update_stmt_operands (cfun, s); +} + +/* Mark statement S as modified, and update it. */ + +static inline void +update_stmt_fn (struct function *fn, gimple *s) +{ + if (gimple_has_ops (s)) + { + gimple_set_modified (s, true); + update_stmt_operands (fn, s); + } +} + + +#endif /* GCC_GIMPLE_SSA_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-streamer.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-streamer.h new file mode 100644 index 0000000..aa88ea2 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-streamer.h @@ -0,0 +1,34 @@ +/* Data structures and functions for streaming GIMPLE. + + Copyright (C) 2011-2022 Free Software Foundation, Inc. + Contributed by Diego Novillo + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_STREAMER_H +#define GCC_GIMPLE_STREAMER_H + +#include "tree-streamer.h" + +/* In gimple-streamer-in.cc */ +void input_bb (class lto_input_block *, enum LTO_tags, class data_in *, + struct function *, int); + +/* In gimple-streamer-out.cc */ +void output_bb (struct output_block *, basic_block, struct function *); + +#endif /* GCC_GIMPLE_STREAMER_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-walk.h b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-walk.h new file mode 100644 index 0000000..bf51c16 --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple-walk.h @@ -0,0 +1,101 @@ +/* Header file for gimple statement walk support. + Copyright (C) 2013-2022 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#ifndef GCC_GIMPLE_WALK_H +#define GCC_GIMPLE_WALK_H + +/* Convenience routines to walk all statements of a gimple function. + Note that this is useful exclusively before the code is converted + into SSA form. Once the program is in SSA form, the standard + operand interface should be used to analyze/modify statements. */ +struct walk_stmt_info +{ + /* Points to the current statement being walked. */ + gimple_stmt_iterator gsi; + gimple *stmt; + + /* Additional data that the callback functions may want to carry + through the recursion. */ + void *info; + + /* Pointer map used to mark visited tree nodes when calling + walk_tree on each operand. If set to NULL, duplicate tree nodes + will be visited more than once. */ + hash_set *pset; + + /* Operand returned by the callbacks. This is set when calling + walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback + returns non-NULL, this field will contain the tree returned by + the last callback. */ + tree callback_result; + + /* Indicates whether the operand being examined may be replaced + with something that matches is_gimple_val (if true) or something + slightly more complicated (if false). "Something" technically + means the common subset of is_gimple_lvalue and is_gimple_rhs, + but we never try to form anything more complicated than that, so + we don't bother checking. + + Also note that CALLBACK should update this flag while walking the + sub-expressions of a statement. For instance, when walking the + statement 'foo (&var)', the flag VAL_ONLY will initially be set + to true, however, when walking &var, the operand of that + ADDR_EXPR does not need to be a GIMPLE value. */ + BOOL_BITFIELD val_only : 1; + + /* True if we are currently walking the LHS of an assignment. */ + BOOL_BITFIELD is_lhs : 1; + + /* Optional. Set to true by the callback functions if they made any + changes. */ + BOOL_BITFIELD changed : 1; + + /* True if we're interested in location information. */ + BOOL_BITFIELD want_locations : 1; + + /* True if we've removed the statement that was processed. */ + BOOL_BITFIELD removed_stmt : 1; +}; + +/* Callback for walk_gimple_stmt. Called for every statement found + during traversal. The first argument points to the statement to + walk. The second argument is a flag that the callback sets to + 'true' if it the callback handled all the operands and + sub-statements of the statement (the default value of this flag is + 'false'). The third argument is an anonymous pointer to data + to be used by the callback. */ +typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *, + struct walk_stmt_info *); + +extern gimple *walk_gimple_seq_mod (gimple_seq *, walk_stmt_fn, walk_tree_fn, + struct walk_stmt_info *); +extern gimple *walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn, + struct walk_stmt_info *); +extern tree walk_gimple_op (gimple *, walk_tree_fn, struct walk_stmt_info *); +extern tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, + walk_tree_fn, struct walk_stmt_info *); +typedef bool (*walk_stmt_load_store_addr_fn) (gimple *, tree, tree, void *); +extern bool walk_stmt_load_store_addr_ops (gimple *, void *, + walk_stmt_load_store_addr_fn, + walk_stmt_load_store_addr_fn, + walk_stmt_load_store_addr_fn); +extern bool walk_stmt_load_store_ops (gimple *, void *, + walk_stmt_load_store_addr_fn, + walk_stmt_load_store_addr_fn); +#endif /* GCC_GIMPLE_WALK_H */ diff --git a/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple.def b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple.def new file mode 100644 index 0000000..296c73c --- /dev/null +++ b/lib/gcc/x86_64-linux-musl/12.2.0/plugin/include/gimple.def @@ -0,0 +1,408 @@ +/* This file contains the definitions of the GIMPLE IR tuples used in GCC. + + Copyright (C) 2007-2022 Free Software Foundation, Inc. + Contributed by Aldy Hernandez + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* The format of this file is + DEFGSCODE(GIMPLE_symbol, printable name, GSS_symbol). */ + + +/* Error marker. This is used in similar ways as ERROR_MARK in tree.def. */ +DEFGSCODE(GIMPLE_ERROR_MARK, "gimple_error_mark", GSS_BASE) + +/* IMPORTANT. Do not rearrange the codes between GIMPLE_COND and + GIMPLE_RETURN. The ordering is exposed by gimple_has_ops calls. + These are all the GIMPLE statements with register operands. */ + +/* GIMPLE_COND + represents the conditional jump: + + if (OP1 COND_CODE OP2) goto TRUE_LABEL else goto FALSE_LABEL + + COND_CODE is the tree code used as the comparison predicate. It + must be of class tcc_comparison. + + OP1 and OP2 are the operands used in the comparison. They must be + accepted by is_gimple_operand. + + TRUE_LABEL and FALSE_LABEL are the LABEL_DECL nodes used as the + jump target for the comparison. */ +DEFGSCODE(GIMPLE_COND, "gimple_cond", GSS_WITH_OPS) + +/* GIMPLE_DEBUG represents a debug statement. */ +DEFGSCODE(GIMPLE_DEBUG, "gimple_debug", GSS_WITH_OPS) + +/* GIMPLE_GOTO represents unconditional jumps. + TARGET is a LABEL_DECL or an expression node for computed GOTOs. */ +DEFGSCODE(GIMPLE_GOTO, "gimple_goto", GSS_WITH_OPS) + +/* GIMPLE_LABEL